gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# The MIT License (MIT)
#
# Copyright (c) 2014 Autodesk, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# http://opensource.org/licenses/MIT
import json
import re
import operator
import zlib
import base64
import os
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render, get_object_or_404, get_list_or_404
from django.template import RequestContext
from survey.models import Question, Survey, SurveyMembership, Experiment, ExperimentUser, ExperimentAnswer
from survey.forms import WorkerIDForm
#from mimic.settings import local as settings
from django.conf import settings
if settings.MIMIC_USE_AZURE_BLOB:
from azure.storage import *
#-----------------
# Decorators
#-----------------
def desktop_only(f):
# mobile devices are not allowed
def wrap(request, *args, **kwargs):
if request.is_android_device or request.is_kindle_device or request.is_ios5_device or request.is_ios_device or request.is_touch_device or request.is_windows_phone_device or request.is_webos:
return redirect(reverse('not_supported'))
return f(request, *args, **kwargs)
wrap.__doc__=f.__doc__
wrap.__name__=f.__name__
return wrap
#-----------------
# Helper Functions
#-----------------
def get_active_survey():
surveys = Survey.objects.filter(active=True)
if len(surveys) < 1:
return 0
return surveys[0]
def create_experiment_session(request, worker_id, condition, survey):
try:
# user exists
user = ExperimentUser.objects.get(worker_id=worker_id)
except ObjectDoesNotExist:
user = ExperimentUser.objects.create(worker_id=worker_id)
experiments = Experiment.objects.filter(user=user)
if len(experiments) > 1:
# user already took an existing survey
return None, None
elif len(experiments) == 1:
experiment = experiments[0]
if experiment.finished == True:
#already finished the experiment, cannot retake it
return None, None
if not request.session.exists(request.session.session_key):
request.session.create()
request.session['experiment_id'] = experiment.id
return experiment, user
else:
# didn't take the survey yet
# update their data
HTTP_REFERER = ""
REMOTE_HOST = ""
REMOTE_ADDR = ""
if request.META.has_key('HTTP_X_REAL_IP'):
REMOTE_ADDR = request.META['HTTP_X_REAL_IP']
elif request.META.has_key('HTTP_X_FORWARDED_FOR'):
REMOTE_ADDR = request.META['HTTP_X_FORWARDED_FOR']
elif request.META.has_key('REMOTE_ADDR'):
REMOTE_ADDR = request.META['REMOTE_ADDR']
if request.META.has_key('REMOTE_HOST'):
REMOTE_HOST = request.META['REMOTE_HOST']
if request.META.has_key('HTTP_REFERER'):
HTTP_REFERER = request.META['HTTP_REFERER']
metaData = {}
for v in request.META:
item = str(request.META[v])
metaData[v] = re.escape(item)
jsonMETA = json.dumps(metaData)
if not request.session.exists(request.session.session_key):
request.session.create()
experiment = Experiment.objects.create(user=user,
survey=survey,
survey_condition=condition,
session_key=request.session.session_key,
remote_address=REMOTE_ADDR,
remote_host=REMOTE_HOST,
http_referer=HTTP_REFERER,
http_user_agent=request.META['HTTP_USER_AGENT'],
allMetaData=jsonMETA,
finished=False)
request.session['experiment_id'] = experiment.id
return experiment, user
def get_questions(survey):
finalQuestions =[]
for sm in SurveyMembership.objects.filter(survey=survey).order_by('order'):
finalQuestions.append(sm.question)
return finalQuestions;
#-----------------
# Views
#-----------------
# -- Errors --
# ------------
def no_active_survey(request):
request.session.flush()
return render(request, 'errors/no_active_survey.html')
def not_supported(request):
request.session.flush()
return render(request, 'errors/not_supported.html', {}, context_instance=RequestContext(request))
# -- Main Views --
# ----------------
@staff_member_required
def reset(request):
request.session.flush()
return HttpResponseRedirect(reverse('survey'))
def processWorkerIDAndExperiment(survey, request):
if not request.session.exists(request.session.session_key):
request.session.create()
if "worker_id" in request.GET:
worker_id = request.GET["worker_id"]
elif "worker_id" in request.POST:
worker_id = request.POST["worker_id"]
elif "worker_id" in request.session:
worker_id = request.session['worker_id']
else:
return None, None, None, None
request.session['worker_id'] = worker_id
if "condition" in request.GET:
condition = request.GET["condition"]
elif "condition" in request.POST:
condition = request.POST["condition"]
elif "condition" in request.session:
condition = request.session['condition']
else:
return None, None, None, None
request.session['condition'] = condition
user = None
if 'experiment_id' in request.session:
try:
experiment = get_object_or_404(Experiment, id=request.session['experiment_id'])
if experiment.survey != survey or experiment.condition != condition:
experiment, user = create_experiment_session(request, worker_id, condition, survey)
else:
try:
user = ExperimentUser.objects.get(worker_id=worker_id)
except:
experiment, user = create_experiment_session(request, worker_id, condition, survey)
except:
experiment, user = create_experiment_session(request, worker_id, condition, survey)
else:
experiment, user = create_experiment_session(request, worker_id, condition, survey)
if experiment == None or user == None:
return None, None, None, None
return worker_id, condition, experiment, user
def mymake_blob_url(container_name, blob_name):
'''
Creates the url to access a blob.
container_name: Name of container.
blob_name: Name of blob.
account_name:
Name of the storage account. If not specified, uses the account
specified when BlobService was initialized.
protocol:
Protocol to use: 'http' or 'https'. If not specified, uses the
protocol specified when BlobService was initialized.
host_base:
Live host base url. If not specified, uses the host base specified
when BlobService was initialized.
'''
return '{0}://{1}{2}/{3}/{4}'.format(settings.AZURE_PROTOCOL,
settings.AZURE_STORAGE_ACCOUNT,
settings.AZURE_HOST_BASE,
container_name,
blob_name)
def saveMouseData(survey, worker_id, question_id, rawEventData):
# XXX need to add support for multiple blob files per answer
url = ""
blob_name = "ExperimentAnswer-"+str(worker_id)+"-"+str(question_id)+'.json'
container_name = "sraw-v14-"+str(survey.slug)
if settings.MIMIC_USE_AZURE_BLOB:
blob_service = BlobService(account_name=settings.AZURE_STORAGE_ACCOUNT, account_key=settings.AZURE_STORAGE_KEY)
blob_service.create_container(container_name)
try:
blob_service.get_blob_metadata(container_name, blob_name)
url = mymake_blob_url(container_name, blob_name)
except:
blob_service.put_blob(container_name, blob_name , rawEventData, x_ms_blob_type='BlockBlob')
url = mymake_blob_url(container_name, blob_name)
#print(url)
else:
directory = os.path.join(settings.MEDIA_ROOT,container_name)
if not os.path.exists(directory):
os.makedirs(directory)
url = os.path.join(container_name, blob_name)
with open(os.path.join(settings.MEDIA_ROOT,url), 'w') as outfile:
outfile.write(rawEventData)
return url
# Used to save questions via json
def save_question(request):
survey = get_active_survey()
if not survey:
return redirect(reverse('no_active_survey'))
worker_id, condition, experiment, user = processWorkerIDAndExperiment(survey, request)
if worker_id== None or condition == None or experiment == None or user == None:
return redirect(reverse('no_active_survey'))
question_finished = False
# Lets create the survey
questions = get_questions(survey)
error = ""
total_questions = len(questions)
if total_questions < 1:
return HttpResponseRedirect(reverse('no_active_survey'))
if request.method == 'POST':
if 'currentQ' in request.POST:
current_question_num = int(request.POST['currentQ'])
else:
current_question_num = 0
current_question = questions[current_question_num]
mouseDataLink = ""
if 'mouseData' in request.POST:
mouseData = request.POST['mouseData']
mouseDataLink = saveMouseData(survey, worker_id, current_question.slug, mouseData)
question_finished = True
answer = ""
if 'answer' in request.POST:
answer = request.POST['answer']
question_finished = True
try:
# answer exists
exp_answer = ExperimentAnswer.objects.get(question=current_question, experiment=experiment, user=user)
if len(mouseDataLink) > 0:
exp_answer.mouseData=mouseDataLink
else:
exp_answer.answer=answer
exp_answer.finished=question_finished
exp_answer.save()
except ObjectDoesNotExist:
ExperimentAnswer.objects.create(question=current_question, experiment=experiment, user=user, mouseData=mouseDataLink, answer=answer, finished=question_finished)
return HttpResponse('{"status": "Done"}\n', mimetype="application/json")
@desktop_only
def home(request):
# Main homepage starts an active survey or else goes to
survey = get_active_survey()
if not survey:
return redirect(reverse('no_active_survey'))
worker_id, condition, experiment, user = processWorkerIDAndExperiment(survey, request)
if worker_id== None or condition == None or experiment == None or user == None:
return redirect(reverse('no_active_survey'))
# Lets create the survey
questions = get_questions(survey)
error = ""
total_questions = len(questions)
if total_questions < 1:
return HttpResponseRedirect(reverse('no_active_survey'))
current_question_num = ExperimentAnswer.objects.filter(experiment=experiment, finished=True).count()
debugFull = 0
if "debugFull" in request.GET:
debugFull = 1
if current_question_num >= total_questions: # All validation rules pass
if not experiment.finished:
experiment.finished = True
experiment.save()
return HttpResponseRedirect(reverse('done')) # Redirect after POST
current_question = questions[current_question_num]
return render(request, current_question.base_template,
{'error': error,
'user':user,
'worker_id':worker_id,
'survey':survey,
'question_template': current_question.template,
'question':current_question.data,
'condition': condition,
'debug':debugFull,
'qnum':current_question_num,
'qtotal':total_questions-1 })
def done(request):
if request.session.has_key('worker_id'):
worker_id = request.session['worker_id']
user = ExperimentUser.objects.get(worker_id=worker_id)
else:
return HttpResponseRedirect(reverse('no_active_survey'))
if request.session.has_key('experiment_id'):
experiment_id = request.session['experiment_id']
else:
return HttpResponseRedirect(reverse('no_active_survey'))
experiment = get_object_or_404(Experiment, id=experiment_id)
survey = experiment.survey
if not experiment.finished:
return HttpResponseRedirect(reverse('survey')) # Redirect after POST
survey_code = survey.survey_code+str(user.id)
request.session.flush()
return render(request, 'done.html', {'survey_code':survey_code}, context_instance=RequestContext(request))
| |
from dal import autocomplete
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, HTML
from django import forms
from django.urls import reverse, reverse_lazy
from demoslogic.blockobjects.forms import VoteForm, SearchForm
from . import models, settings
class NewPremiseForm(forms.ModelForm):
class Meta:
model = models.Premise
fields = ['premise_type']
labels = {'premise_type': "Which type?"}
def __init__(self, *args, **kwargs):
super(NewPremiseForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'get'
self.helper.form_action = reverse_lazy('premises:create')
self.helper.add_input(Submit('submit', 'Choose'))
class SearchPremiseForm(SearchForm):
search_id = forms.ModelChoiceField(label = "Find QStatement:", queryset =models.Premise.objects.all(),
widget = autocomplete.ModelSelect2(
url = 'premises:autocomplete',
attrs = {'data-minimum-input-length': 0}))
class SearchNounForm(SearchForm):
search_id = forms.ModelChoiceField(label = "Find Entity:", queryset=models.Noun.objects.all(),
widget=autocomplete.ModelSelect2(
url = 'premises:nouns_autocomplete',
attrs = {'data-minimum-input-length': 0}))
class SearchVerbForm(SearchForm):
search_id = forms.ModelChoiceField(label = "Find Verb:", queryset=models.Verb.objects.all(),
widget=autocomplete.ModelSelect2(
url = 'premises:verbs_autocomplete',
attrs = {'data-minimum-input-length': 0}))
class SearchAdjectiveForm(SearchForm):
search_id = forms.ModelChoiceField(label = "Find Attribute:", queryset=models.Adjective.objects.all(),
widget=autocomplete.ModelSelect2(
url = 'premises:adjectives_autocomplete',
attrs = {'data-minimum-input-length': 0}))
class PremiseVoteForm(VoteForm):
def __init__(self, *args, **kwargs):
object = kwargs.pop('object', None)
super(PremiseVoteForm, self).__init__(*args, **kwargs) #loads form helper
if object:
self.fields['value'].initial = None
self.fields['value'].choices = object.get_theses_choices()
self.fields['value2'].initial = None
self.fields['value2'].choices = object.get_demands_choices()
# self.max_choice = object.max_choice
class Meta:
model = models.PremiseVote
fields = ['value', 'value2']
widgets = {'value': forms.RadioSelect, 'value2': forms.RadioSelect}
labels = {'value': "Which of these statements do you think gets closest to the truth?",
'value2': "Which of these statements reflects your opinion most?"}
class PremiseCreateForm(forms.ModelForm):
class Meta:
model = models.Premise
fields = ['premise_type']
def clean(self):
cleaned_data = super(PremiseCreateForm, self).clean()
premise_type = cleaned_data.get("premise_type")
premise_type = self.premise_type
if cleaned_data.get("key_subject", None) == cleaned_data.get("key_object", None):
raise forms.ValidationError("Same thing twice? Try harder :).")
return cleaned_data
def clean_doublets(self, cd):
premises = models.Premise.objects.filter(premise_type = cd.get("premise_type")) \
.filter(key_subject = cd.get("key_subject", None)) \
.filter(key_object = cd.get("key_object", None)) \
.filter(key_indirect_object = cd.get("key_indirect_object", None)) \
.filter(key_complement = cd.get("key_complement", None)) \
.filter(key_predicate = cd.get("key_predicate", None))
if premises.count():
raise forms.ValidationError("This premise already exists.")
else:
return cd
def __init__(self, *args, **kwargs):
super(PremiseCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'blueForms'
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'Submit'))
self.fields['premise_type'].initial = self.premise_type
self.fields['premise_type'].label = 'Type of statement:'
self.fields['premise_type'].widget.attrs['readonly'] = 'readonly'
self.fields['premise_type'].widget.attrs['onChange'] = "window.location='" + reverse("premises:new") + "'"
class CategorizationCreateForm(PremiseCreateForm):
premise_type = settings.TYPE_CATEGORIZATION
class Meta(PremiseCreateForm.Meta):
fields = ['premise_type', 'key_subject', 'key_object']
widgets = {
'key_subject': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_object']),
'key_object': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_subject']),
}
labels = {
'key_subject': '',
'key_object': 'is/should (not) be a type of',
}
class CollectionCreateForm(PremiseCreateForm):
premise_type = settings.TYPE_COLLECTION
class Meta(PremiseCreateForm.Meta):
fields = ['premise_type', 'key_subject', 'key_object']
widgets = {
'key_subject': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_object']),
'key_object': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_subject']),
}
labels = {
'key_subject': '',
'key_object': 'does/should not/partly/exclusively comprise',
}
class ComparisonCreateForm(PremiseCreateForm):
premise_type = settings.TYPE_COMPARISON
class Meta(PremiseCreateForm.Meta):
fields = ['premise_type', 'key_subject', 'key_complement', 'key_object', 'key_indirect_object']
widgets = {
'key_subject': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_object']),
'key_complement': autocomplete.ModelSelect2(url = 'premises:adjectives_autocomplete_create'),
'key_object': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_subject']),
'key_indirect_object': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create'),
}
labels = {
'key_subject': '',
'key_complement': 'is/should be equally/less/more',
'key_object': 'as/than',
'key_indirect_object': 'for (the) [optional]'
}
class RelationCreateForm(PremiseCreateForm):
premise_type = settings.TYPE_RELATION
class Meta(PremiseCreateForm.Meta):
fields = ['premise_type', 'key_subject', 'key_object', 'key_indirect_object']
widgets = {
'key_subject': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_object']),
'key_object': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_subject']),
'key_indirect_object': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create'),
}
labels = {
'key_subject': 'When there is more',
'key_object': ', there is/should be more/less/no change in',
'key_indirect_object': 'for (the) [optional]'
}
class QuantityCreateForm(PremiseCreateForm):
premise_type = settings.TYPE_QUANTITY
class Meta(PremiseCreateForm.Meta):
fields = ['premise_type', 'key_subject', 'key_indirect_object']
widgets = {
'key_subject': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_indirect_object']),
'key_indirect_object': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_subject']),
}
labels = {
'key_subject': 'There is (no)/might be or should be more/less/an equal amount of',
'key_indirect_object': 'for (the) [optional]'
}
class EncouragmentCreateForm(PremiseCreateForm):
premise_type = settings.TYPE_ENCOURAGEMENT
class Meta(PremiseCreateForm.Meta):
fields = ['premise_type', 'key_subject', 'key_indirect_object']
widgets = {
'key_subject': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_indirect_object']),
'key_indirect_object': autocomplete.ModelSelect2(url = 'premises:nouns_autocomplete_create',
forward = ['key_subject']),
}
labels = {
'key_subject': 'The following is/should be encouraged/discouraged/left alone:',
'key_indirect_object': 'for (the) [optional]'
}
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D uint8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels, num_classes)
return labels
class DataSet(object):
def __init__(self,
images,
labels,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False, shuffle=True):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)
]
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = numpy.arange(self._num_examples)
numpy.random.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self.images[start:end]
labels_new_part = self.labels[start:end]
return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=5000):
if fake_data:
def fake():
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f, one_hot=one_hot)
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape)
validation = DataSet(validation_images,
validation_labels,
dtype=dtype,
reshape=reshape)
test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape)
return base.Datasets(train=train, validation=validation, test=test)
def load_mnist(train_dir='MNIST-data'):
return read_data_sets(train_dir)
| |
import os
import time
from copy import copy
from datetime import datetime
from optparse import make_option
from paramiko import SSHException
from django_backup.utils import (
GOOD_RSYNC_FLAG,
TIME_FORMAT,
decide_remove,
is_db_backup,
is_media_backup,
is_backup,
BaseBackupCommand,
DATABASE_ENGINES
)
from django.core.management.base import BaseCommand, CommandError
from django.core.mail import EmailMessage
from django.conf import settings
from django.db import connection
# Based on: http://www.djangosnippets.org/snippets/823/
# Based on: http://www.yashh.com/blog/2008/sep/05/django-database-backup-view/
class Command(BaseBackupCommand):
options = BaseBackupCommand.options + (
make_option(
'--email',
default=None, dest='email',
help='Sends email with attached dump file'
),
make_option(
'--ftp', '-f',
action='store_true', default=False, dest='ftp',
help='Backup file via FTP'
),
make_option(
'--compress', '-c',
action='store_true', default=False, dest='compress',
help='Compress dump file'
),
make_option(
'--directory', '-d',
action='append', default=[], dest='directories',
help='Destination Directory'
),
make_option(
'--zipencrypt', '-z',
action='store_true', default=False,
dest='zipencrypt', help='Compress and encrypt SQL dump file using zip'
),
make_option(
'--media', '-m',
action='store_true', default=False, dest='media',
help='Backup media dir'
),
make_option(
'--rsync', '-r',
action='store_true', default=False, dest='rsync',
help='Backup media dir with rsync'
),
make_option(
'--cleandb',
action='store_true', default=False, dest='clean_db',
help='Clean up surplus database backups'
),
make_option(
'--cleanmedia',
action='store_true', default=False, dest='clean_media',
help='Clean up surplus media backups'
),
make_option(
'--cleanrsync',
action='store_true', default=False, dest='clean_rsync',
help='Clean up broken rsync backups'
),
make_option(
'--nolocal',
action='store_true', default=False, dest='no_local',
help='Reserve local backup or not'
),
make_option(
'--deletelocal',
action='store_true', default=False, dest='delete_local',
help='Delete all local backups'
),
make_option(
'--cleanlocaldb',
action='store_true', default=False, dest='clean_local_db',
help='Clean up surplus local database backups'
),
make_option(
'--cleanremotedb',
action='store_true', default=False, dest='clean_remote_db',
help='Clean up surplus remote database backups'
),
make_option(
'--cleanlocalmedia',
action='store_true', default=False, dest='clean_local_media',
help='Clean up surplus local media backups'
),
make_option(
'--cleanremotemedia',
action='store_true', default=False, dest='clean_remote_media',
help='Clean up surplus remote media backups'
),
make_option(
'--cleanlocalrsync',
action='store_true', default=False, dest='clean_local_rsync',
help='Clean up local broken rsync backups'
),
make_option(
'--cleanremotersync',
action='store_true', default=False, dest='clean_remote_rsync',
help='Clean up remote broken rsync backups'
),
make_option(
'--application', '-a',
action='append', default=[], dest='apps',
help='Optionally only back up certain Django apps'
),
)
help = "Backup database. Only Mysql and Postgresql engines are implemented"
def handle(self, *args, **kwargs):
try:
self._handle(*args, **kwargs)
finally:
self.close_connection()
def _write(self, content):
"""When stdout is lost we should stop writing content and continue the backup process. """
try:
self.stdout.write(content)
except:
pass
def _handle(self, *args, **options):
self.time_suffix = time.strftime(TIME_FORMAT)
self.email = options.get('email')
self.ftp = options.get('ftp')
self.compress = options.get('compress')
self.directories = options.get('directories')
self.zipencrypt = options.get('zipencrypt')
self.encrypt_password = os.environ.get('BACKUP_PASSWORD')
self.media = options.get('media')
self.rsync = options.get('rsync')
self.clean = options.get('clean')
self.clean_db = options.get('clean_db')
self.clean_media = options.get('clean_media')
self.clean_rsync = options.get('clean_rsync') and self.rsync # Only when rsync is True
self.clean_local_db = options.get('clean_local_db')
self.clean_remote_db = options.get('clean_remote_db')
self.clean_local_media = options.get('clean_local_media')
self.clean_remote_media = options.get('clean_remote_media')
self.clean_local_rsync = options.get('clean_local_rsync') and self.rsync # Only when rsync is True
self.clean_remote_rsync = options.get('clean_remote_rsync') and self.rsync # Only when rsync is True
self.no_local = options.get('no_local')
self.delete_local = options.get('delete_local')
self.apps = options.get('apps')
if self.zipencrypt and not self.encrypt_password:
raise CommandError(
'Please specify a password for your backup file'
' using the BACKUP_PASSWORD environment variable.'
)
if self.clean_rsync:
self._write('cleaning broken rsync backups')
self.clean_broken_rsync()
else:
if self.clean_local_rsync:
self._write('cleaning local broken rsync backups')
self.clean_local_broken_rsync()
if self.clean_remote_rsync:
self._write('cleaning remote broken rsync backups')
self.clean_remote_broken_rsync()
if self.clean_db:
self._write('cleaning surplus database backups')
self.clean_surplus_db()
if self.clean_local_db:
self._write('cleaning local surplus database backups')
self.clean_local_surplus_db()
if self.clean_remote_db:
self._write('cleaning remote surplus database backups')
self.clean_remote_surplus_db()
if self.clean_media:
self._write('cleaning surplus media backups')
self.clean_surplus_media()
if self.clean_local_media:
self._write('cleaning local surplus media backups')
self.clean_local_surplus_media()
if self.clean_remote_media:
self._write('cleaning remote surplus media backups')
self.clean_remote_surplus_media()
if not os.path.exists(self.backup_dir):
os.makedirs(self.backup_dir)
outfile = os.path.join(self.backup_dir, 'backup_%s.sql' % self.time_suffix)
# Doing backup
if self.engine in DATABASE_ENGINES['mysql']:
self._write('Doing Mysql backup to database %s into %s' % (self.db, outfile))
self.do_mysql_backup(outfile)
elif self.engine in DATABASE_ENGINES['postgresql']:
self._write('Doing Postgresql backup to database %s into %s' % (self.db, outfile))
self.do_postgresql_backup(outfile)
else:
raise CommandError('Backup in %s engine not implemented' % self.engine)
# Compressing backup
if self.compress:
compressed_outfile = outfile + '.gz'
self._write('Compressing backup file %s to %s' % (outfile, compressed_outfile))
self.do_compress(outfile, compressed_outfile)
outfile = compressed_outfile
if self.zipencrypt:
zip_encrypted_outfile = "%s.zip" % outfile
self._write('Zipping and cncrypting backup file %s to %s' % (outfile, zip_encrypted_outfile))
self.do_encrypt(outfile, zip_encrypted_outfile)
outfile = zip_encrypted_outfile
# Backing up media directories,
if self.media:
self.directories += [self.directory_to_backup]
# Backing up directories
dir_outfiles = []
if self.directories: # We need to do media backup
all_directories = ' '.join(self.directories)
self.all_directories = all_directories
if self.rsync:
self.do_media_rsync_backup()
else:
# Backup all the directories in one file.
all_outfile = os.path.join(self.backup_dir, 'dir_%s.tar.gz' % self.time_suffix)
self.compress_dir(all_directories, all_outfile)
dir_outfiles.append(all_outfile)
# Sending mail with backups
if self.email:
self._write("Sending e-mail with backups to '%s'" % self.email)
self.sendmail(settings.SERVER_EMAIL, [self.email], dir_outfiles + [outfile])
if self.ftp:
self._write("Saving to remote server")
self.store_ftp(local_files=[os.path.join(os.getcwd(), x) for x in dir_outfiles + [outfile]])
def compress_dir(self, directory, outfile):
self._write('Backup directories ...')
command = 'cd %s && tar -czf %s *' % (directory, outfile)
self._write('=' * 70)
self._write('Running Command: %s' % command)
os.system(command)
@staticmethod
def get_blacklist_tables():
"""
Exclude BACKUP_TABLES_BLACKLIST if it's defined.
"""
return getattr(settings, 'BACKUP_TABLES_BLACKLIST', [])
@staticmethod
def get_tables_for_apps(*apps):
"""
Get table names for all for the given applications.
"""
tables = connection.introspection.django_table_names(only_existing=True)
def check_table(table):
return any(table.startswith('%s_' % app) for app in apps)
return list(filter(check_table, tables))
def store_ftp(self, local_files=None):
self.ensure_remote_dir_exists()
if not local_files:
local_files = []
sftp = self.get_connection()
for local_file in local_files:
filename = os.path.split(local_file)[-1]
self._write('Saving %s to remote server ' % local_file)
sftp.put(local_file, os.path.join(self.remote_dir or '', filename))
if self.delete_local:
backups = os.listdir(self.backup_dir)
backups = list(filter(is_backup, backups))
backups.sort()
self._write('=' * 70)
self._write('--cleanlocal, local db and media backups found: %s' % backups)
remove_list = backups
self._write('local db and media backups to clean %s' % remove_list)
remove_all = ' '.join([os.path.join(self.backup_dir, i) for i in remove_list])
if remove_all:
self._write('=' * 70)
self._write('cleaning up local db and media backups')
command = 'rm -rf %s' % remove_all
self._write('=' * 70)
self._write('Running Command: %s' % command)
os.system(command)
# remote(ftp server)
elif self.no_local:
to_remove = local_files
self._write('=' * 70)
self._write('--nolocal, Local files to remove %s' % to_remove)
remove_all = ' '.join(to_remove)
if remove_all:
self._write('=' * 70)
self._write('cleaning up local backups')
command = 'rm -rf %s' % remove_all
self._write('=' * 70)
self._write('Running Command: %s' % command)
os.system(command)
@staticmethod
def sendmail(address_from, addresses_to, attachments):
subject = "Your DB-backup for " + datetime.now().strftime("%d %b %Y")
body = "Timestamp of the backup is " + datetime.now().strftime("%d %b %Y")
email = EmailMessage(subject, body, address_from, addresses_to)
email.content_subtype = 'html'
for attachment in attachments:
email.attach_file(attachment)
email.send()
@staticmethod
def do_compress(infile, outfile):
os.system('gzip --stdout %s > %s' % (infile, outfile))
os.system('rm %s' % infile)
def do_encrypt(self, infile, outfile):
os.system('zip -P %s %s %s' % (self.encrypt_password, outfile, infile))
os.system('rm %s' % infile)
def do_mysql_backup(self, outfile):
if self.apps:
raise NotImplementedError("Backuping up only ceratain apps not implemented in MySQL")
args = []
if self.user:
args += ["--user='%s'" % self.user]
if self.passwd:
args += ["--password='%s'" % self.passwd]
if self.host:
args += ["--host='%s'" % self.host]
if self.port:
args += ["--port=%s" % self.port]
args += [self.db]
base_args = copy(args)
blacklist_tables = self.get_blacklist_tables()
if blacklist_tables:
all_tables = connection.introspection.get_table_list(connection.cursor())
try:
all_tables = [table.name for table in all_tables]
except AttributeError:
pass
tables = list(set(all_tables) - set(blacklist_tables))
args += tables
os.system('%s %s > %s' % (getattr(settings, 'BACKUP_SQLDUMP_PATH', 'mysqldump'), ' '.join(args), outfile))
# Append table structures of blacklist_tables
if blacklist_tables:
all_tables = connection.introspection.get_table_list(connection.cursor())
blacklist_tables = list(set(all_tables) and set(blacklist_tables))
args = base_args + ['-d'] + blacklist_tables
cmd = '%s %s >> %s' % (getattr(settings, 'BACKUP_SQLDUMP_PATH', 'mysqldump'), ' '.join(args), outfile)
os.system(cmd)
def do_postgresql_backup(self, outfile):
args = []
if self.user:
args += ["--username=%s" % self.user]
if self.host:
args += ["--host=%s" % self.host]
if self.port:
args += ["--port=%s" % self.port]
if self.db:
args += [self.db]
pgdump_path = getattr(settings, 'BACKUP_PG_DUMP_PATH', 'pg_dump')
if self.passwd:
os.environ['PGPASSWORD'] = self.passwd
table_args = ' '.join(
'-t %s ' % table for table in self.get_tables_for_apps(*self.apps)
)
if table_args:
table_args = '-a %s' % table_args
pgdump_cmd = '%s %s %s > %s' % (pgdump_path, ' '.join(args), table_args or '--clean', outfile)
self._write(pgdump_cmd)
os.system(pgdump_cmd)
def clean_local_surplus_db(self):
try:
backups = os.listdir(self.backup_dir)
backups = list(filter(is_db_backup, backups))
backups.sort()
self._write('=' * 70)
self._write('local db backups found: %s' % backups)
remove_list = decide_remove(backups, settings.BACKUP_DATABASE_COPIES)
self._write('=' * 70)
self._write('local db backups to clean %s' % remove_list)
remove_all = ' '.join([os.path.join(self.backup_dir, i) for i in remove_list])
if remove_all:
self._write('=' * 70)
self._write('cleaning up local db backups')
command = 'rm %s' % remove_all
self._write('=' * 70)
self._write('Running Command: %s' % command)
os.system(command)
except ImportError:
self.stderr.write('Cleaned nothing, because BACKUP_DATABASE_COPIES is missing\n')
def clean_remote_surplus_db(self):
self.ensure_remote_dir_exists()
try:
sftp = self.get_connection()
backups = [i.strip() for i in sftp.listdir(self.remote_dir)]
backups = list(filter(is_db_backup, backups))
backups.sort()
self._write('=' * 70)
self._write('remote db backups found: %s' % backups)
remove_list = decide_remove(backups, settings.BACKUP_DATABASE_COPIES)
self._write('=' * 70)
self._write('remote db backups to clean %s' % remove_list)
if remove_list:
self._write('=' * 70)
self._write('cleaning up remote db backups')
for file_ in remove_list:
target_path = os.path.join(self.remote_dir, file_)
self._write('Removing %s' % target_path)
sftp.remove(target_path)
except ImportError:
self.stderr.write('Cleaned nothing, because BACKUP_DATABASE_COPIES is missing\n')
def clean_surplus_db(self):
self.clean_local_surplus_db()
self.clean_remote_surplus_db()
def clean_surplus_media(self):
self.clean_local_surplus_media()
self.clean_remote_surplus_media()
def clean_local_surplus_media(self):
try:
# local(web server)
backups = os.listdir(self.backup_dir)
backups = list(filter(is_media_backup, backups))
backups.sort()
self._write('=' * 70)
self._write('local media backups found: %s' % backups)
remove_list = decide_remove(backups, settings.BACKUP_MEDIA_COPIES)
self._write('=' * 70)
self._write('local media backups to clean %s' % remove_list)
remove_all = ' '.join([os.path.join(self.backup_dir, i) for i in remove_list])
if remove_all:
self._write('=' * 70)
self._write('cleaning up local media backups')
command = 'rm -rf %s' % remove_all
self._write('=' * 70)
self._write('Running Command: %s' % command)
os.system(command)
except ImportError:
self.stderr.write('Cleaned nothing, because BACKUP_MEDIA_COPIES is missing\n')
def clean_remote_surplus_media(self):
self.ensure_remote_dir_exists()
try:
sftp = self.get_connection()
backups = [i.strip() for i in sftp.listdir(self.remote_dir)]
backups = list(filter(is_media_backup, backups))
backups.sort()
self._write('=' * 70)
self._write('remote media backups found: %s' % backups)
remove_list = decide_remove(backups, settings.BACKUP_MEDIA_COPIES)
self._write('=' * 70)
self._write('remote media backups to clean %s' % remove_list)
if remove_list:
self._write('=' * 70)
self._write('cleaning up remote media backups')
for file_ in remove_list:
target_path = os.path.join(self.remote_dir, file_)
self._write('Removing %s' % target_path)
command = 'rm -rf %s' % target_path
try:
sftp.execute(command)
except SSHException:
# While testing it's too expensive to emulate a server with ssh and command line execution
# ability. So we fallback to sftp.rmdir when "rm -r dir" is not possible.
sftp.rmdir(target_path)
except ImportError:
self.stderr.write('Cleaned nothing, because BACKUP_MEDIA_COPIES is missing\n')
def do_media_rsync_backup(self):
if self.rsyncnosymlink:
rsync_options = '-rlptgoDz'
else:
rsync_options = '-az --copy-dirlinks'
# Local media rsync backup
if not self.delete_local and not self.no_local:
self._write('Doing local media rsync backup')
local_current_backup = os.path.join(self.backup_dir, 'current')
local_backup_target = os.path.join(self.backup_dir, 'dir_%s' % self.time_suffix)
local_info = {
'local_current_backup': local_current_backup,
'all_directories': self.all_directories,
'local_backup_target': local_backup_target,
'rsync_flag': GOOD_RSYNC_FLAG,
}
# We used to use -az, which equals to -rlptgoD and -z.
# Now we need more control over this, using -rptgoD instead of -a to disable symlink backup.
local_info.update({'rsync_options': rsync_options})
local_rsync_cmd = 'rsync %(rsync_options)s --link-dest=%(local_current_backup)s %(all_directories)s %(local_backup_target)s' % local_info
local_mark_cmd = 'touch %(local_backup_target)s/%(rsync_flag)s' % local_info
local_link_cmd = 'rm %(local_current_backup)s; ln -s %(local_backup_target)s %(local_current_backup)s' % local_info
cmd = '\n'.join(['%s&&%s' % (local_rsync_cmd, local_mark_cmd), local_link_cmd])
self._write(cmd)
os.system(cmd)
# Remote media rsync backup
if self.ftp:
self._write('Doing remote media rsync backup')
self.ensure_remote_dir_exists()
host = '%s@%s' % (self.ftp_username, self.ftp_server)
remote_current_backup = os.path.join(self.remote_dir, 'current')
remote_backup_target = os.path.join(self.remote_dir, 'dir_%s' % self.time_suffix)
remote_info = {
'remote_current_backup': remote_current_backup,
'all_directories': self.all_directories,
'host': host,
'remote_backup_target': remote_backup_target,
'rsync_flag': GOOD_RSYNC_FLAG,
}
remote_info.update({'rsync_options': rsync_options, })
remote_rsync_cmd = 'rsync %(rsync_options)s --link-dest=%(remote_current_backup)s %(all_directories)s %(host)s:%(remote_backup_target)s' % remote_info
remote_mark_cmd = 'ssh %(host)s "touch %(remote_backup_target)s/%(rsync_flag)s"' % remote_info
remote_link_cmd = 'ssh %(host)s "rm %(remote_current_backup)s; ln -s %(remote_backup_target)s %(remote_current_backup)s"' % remote_info
cmd = '\n'.join(['%s&&%s' % (remote_rsync_cmd, remote_mark_cmd), remote_link_cmd])
self._write(cmd)
os.system(cmd)
def clean_broken_rsync(self):
self.clean_local_broken_rsync()
self.clean_remote_broken_rsync()
def clean_remote_broken_rsync(self):
self.ensure_remote_dir_exists()
sftp = self.get_connection()
backups = [i.strip() for i in sftp.execute('ls %s' % self.remote_dir)]
backups = list(filter(is_media_backup, backups))
backups.sort()
commands = []
for backup in backups:
# Find the GOOD_RSYNC_FLAG file in the backup dir
backup_path = os.path.join(self.remote_dir, backup)
flag_file = os.path.join(backup_path, GOOD_RSYNC_FLAG)
cmd = 'test -e %s||rm -rf %s' % (flag_file, backup_path)
commands.append(cmd)
full_cmd = '\n'.join(commands)
self._write(full_cmd)
sftp.execute(full_cmd)
# after we clean the backups, recreate the "current" symlink.
backups = [i.strip() for i in sftp.execute('ls %s' % self.remote_dir)]
backups = list(filter(is_media_backup, backups))
backups.sort()
if backups:
host = '%s@%s' % (self.ftp_username, self.ftp_server)
current_link = os.path.join(self.remote_dir, 'current')
latest_backup = os.path.join(self.remote_dir, backups[-1])
remote_info = dict(current_link=current_link, latest_backup=latest_backup, host=host)
remote_link_cmd = 'ssh %(host)s "rm %(current_link)s; ln -s %(latest_backup)s %(current_link)s"' % remote_info
self._write(remote_link_cmd)
os.system(remote_link_cmd)
def clean_local_broken_rsync(self):
# local(web server)
backups = os.listdir(self.backup_dir)
backups = list(filter(is_media_backup, backups))
backups.sort()
commands = []
for backup in backups:
# Find the GOOD_RSYNC_FLAG file in the backup dir
backup_path = os.path.join(self.backup_dir, backup)
flag_file = os.path.join(backup_path, GOOD_RSYNC_FLAG)
cmd = 'test -e %s||rm -rf %s' % (flag_file, backup_path)
commands.append(cmd)
full_cmd = '\n'.join(commands)
self._write(full_cmd)
os.system(full_cmd)
# after we clean the backups, recreate the "current" symlink.
backups = os.listdir(self.backup_dir)
backups = list(filter(is_media_backup, backups))
backups.sort()
if backups:
current_link = os.path.join(self.backup_dir, 'current')
latest_backup = os.path.join(self.backup_dir, backups[-1])
info = dict(current_link=current_link, latest_backup=latest_backup)
link_cmd = 'rm %(current_link)s; ln -s %(latest_backup)s %(current_link)s' % info
self._write(link_cmd)
os.system(link_cmd)
def ensure_remote_dir_exists(self):
if self.remote_dir:
sftp = self.get_connection()
try:
self._write('Creating remote dir: %s' % self.remote_dir)
sftp.makedirs(self.remote_dir, 755)
except IOError:
self.stderr.write('Failed to create remote dir: %s Attempting to continue\n' % self.remote_dir)
else:
self.stderr.write('Remote action called but no remote dir set')
| |
#!/neo/opt/bin/python
#
# Copyright (C) 2001 by Neotonic Software Corporation
# All Rights Reserved.
#
# hdfhelp.py
#
# This code makes using odb with Clearsilver as "easy as stealing candy
# from a baby". - jeske
#
# How to use:
#
# rows = tbl.fetchAllRows()
# rows.hdfExport("CGI.rows", hdf_dataset)
#
# row = tbl.fetchRow( ('primary_key', value) )
# row.hdfExport("CGI.row", hdf_dataset)
#
# How to setup:
#
# # define table
# class AgentsTable(odb.Table):
# def _defineRows(self):
# self.d_addColumn("agent_id",kInteger,None,primarykey = 1,autoincrement = 1)
# self.d_addColumn("login",kVarString,200,notnull=1)
# self.d_addColumn("ticket_count",kIncInteger,None)
#
# # make sure you return a subclass of hdfhelp.HdfRow
#
# def defaultRowClass(self):
# return hdfhelp.HdfRow
# def defaultRowListClass(self):
# return hdfhelp.HdfItemList
#
import string, os
import neo_cgi
import neo_cs
import neo_util
import odb
import time
import UserList
SECS_IN_MIN = 60
SECS_IN_HOUR = (SECS_IN_MIN * 60)
SECS_IN_DAY = (SECS_IN_HOUR * 24)
SECS_IN_WEEK = (SECS_IN_DAY * 7)
SECS_IN_MONTH = (SECS_IN_DAY * 30)
kYearPos = 0
kMonthPos = 1
kDayPos = 2
kHourPos = 3
kMinutePos = 4
kSecondPos = 5
kWeekdayPos = 6
kJulianDayPos = 7
kDSTPos = 8
def renderDate(then_time,day=0):
if then_time is None:
then_time = 0
then_time = int(then_time)
if then_time == 0 or then_time == -1:
return ""
then_tuple = time.localtime(then_time)
now_tuple = time.localtime(time.time())
if day or (then_tuple[kHourPos]==0 and then_tuple[kMinutePos]==0 and then_tuple[kSecondPos]==0):
# it's just a date
if then_tuple[kYearPos] == now_tuple[kYearPos]:
# no year
return time.strftime("%m/%d",then_tuple)
else:
# add year
return time.strftime("%m/%d/%Y",then_tuple)
else:
# it's a full time/date
return time.strftime("%m/%d/%Y %I:%M%p",then_tuple)
class HdfRow(odb.Row):
def hdfExport(self, prefix, hdf_dataset, *extra, **extranamed):
skip_fields = extranamed.get("skip_fields", None)
translate_dict = extranamed.get("translate_dict", None)
tz = extranamed.get("tz", "US/Pacific")
for col_name,value in self.items():
if skip_fields and (col_name in skip_fields):
continue
try:
name,col_type,col_options = self._table.getColumnDef(col_name)
except:
col_type = odb.kVarString
col_options = {}
if (value is not None):
if col_options.get("no_export",0): continue
if type(value) in [ type(0), type(0L) ]:
hdf_dataset.setValue(prefix + "." + col_name,"%d" % value)
elif type(value) == type(1.0):
if int(value) == value:
hdf_dataset.setValue(prefix + "." + col_name,"%d" % value)
else:
hdf_dataset.setValue(prefix + "." + col_name,"%0.2f" % value)
else:
if col_type == odb.kReal:
log("why are we here with this value: %s" % value)
if translate_dict:
for k,v in translate_dict.items():
value = string.replace(value,k,v)
hdf_dataset.setValue(prefix + "." + col_name,neo_cgi.htmlEscape(str(value)))
if col_options.get("int_date",0):
hdf_dataset.setValue(prefix + "." + col_name + ".string",renderDate(value))
hdf_dataset.setValue(prefix + "." + col_name + ".day_string",renderDate(value,day=1))
if value: neo_cgi.exportDate(hdf_dataset, "%s.%s" % (prefix, col_name), tz, value)
if col_options.has_key("enum_values"):
enum = col_options["enum_values"]
hdf_dataset.setValue(prefix + "." + col_name + ".enum",
str(enum.get(value,'')))
class HdfItemList(UserList.UserList):
def hdfExport(self,prefix,hdf_dataset,*extra,**extranamed):
export_by = extranamed.get("export_by", None)
n = 0
for row in self:
if export_by is not None:
n = row[export_by]
row.hdfExport("%s.%d" % (prefix,n),hdf_dataset,*extra,**extranamed)
n = n + 1
def setList(hdf, prefix, lst):
hdf.setValue(prefix+".0", str(len(lst)))
for n in range(len(lst)):
hdf.setValue(prefix+".%d" %(n+1), lst[n]);
def getList(hdf, name):
lst = []
for n in range(hdf.getIntValue(name,0)):
lst.append(hdf.getValue(name+".%d" %(n+1), ""))
return lst
def eval_cs(hdf,a_cs_string):
cs = neo_cs.CS(hdf)
try:
cs.parseStr(a_cs_string)
return cs.render()
except:
return "Error in CS tags: %s" % neo_cgi.htmlEscape(repr(a_cs_string))
def childloop(hdf):
children = []
if hdf:
hdf = hdf.child()
while hdf:
children.append(hdf)
hdf = hdf.next()
return children
# ----------------------------
class HDF_Database(odb.Database):
def defaultRowClass(self):
return HdfRow
def defaultRowListClass(self):
return HdfItemList
# ----------------------------
def loopHDF(hdf, name=None):
results = []
if name: o = hdf.getObj(name)
else: o = hdf
if o:
o = o.child()
while o:
results.append(o)
o = o.next()
return results
def loopKVHDF(hdf, name=None):
results = []
if name: o = hdf.getObj(name)
else: o = hdf
if o:
o = o.child()
while o:
results.append((o.name(), o.value()))
o = o.next()
return results
class hdf_iterator:
def __init__(self, hdf):
self.hdf = hdf
self.node = None
if self.hdf:
self.node = self.hdf.child()
def __iter__(self): return self
def next(self):
if not self.node:
raise StopIteration
ret = self.node
self.node = self.node.next()
return ret
class hdf_kv_iterator(hdf_iterator):
def next(self):
if not self.node: raise StopIteration
ret = (self.node.name(), self.node.value())
self.node = self.node.next()
return ret
class hdf_key_iterator(hdf_iterator):
def next(self):
if not self.node: raise StopIteration
ret = self.node.name()
self.node = self.node.next()
return ret
class hdf_ko_iterator(hdf_iterator):
def next(self):
if not self.node: raise StopIteration
ret = (self.node.name(), self.node)
self.node = self.node.next()
return ret
# ----------------------------
def test():
import neo_util
hdf = neo_util.HDF()
hdf.setValue("foo","1")
print eval_cs(hdf,"this should say 1 ===> <?cs var:foo ?>")
if __name__ == "__main__":
test()
| |
#!/usr/bin/python3
"""Tests for editing pages."""
#
# (C) Pywikibot team, 2015-2022
#
# Distributed under the terms of the MIT license.
#
import time
import unittest
from contextlib import suppress
import pywikibot
from pywikibot import config, page_put_queue
from pywikibot.exceptions import Error
from tests.aspects import TestCase
from tests.oauth_tests import OAuthSiteTestCase
called_back = False
class TestGeneralWrite(TestCase):
"""Run general write tests."""
family = 'wikipedia'
code = 'test'
login = True
write = True
def test_createonly(self):
"""Test save with createonly enforced."""
ts = str(time.time())
p = pywikibot.Page(self.site, 'User:John Vandenberg/createonly/' + ts)
p.save(createonly=True)
def test_async(self):
"""Test writing to a page."""
global called_back
def callback(page, err):
global called_back
self.assertEqual(page, p)
self.assertIsNone(err)
called_back = True
self.assertTrue(page_put_queue.empty())
called_back = False
ts = str(time.time())
p = pywikibot.Page(self.site, 'User:John Vandenberg/async test write')
p.text = ts
p.save(asynchronous=True, callback=callback)
page_put_queue.join()
p = pywikibot.Page(self.site, 'User:John Vandenberg/async test write')
self.assertEqual(p.text, ts)
self.assertTrue(called_back)
def test_appendtext(self):
"""Test writing to a page without preloading the .text."""
ts = str(time.time())
p = pywikibot.Page(self.site, 'User:John Vandenberg/appendtext test')
self.assertFalse(hasattr(p, '_text'))
p.site.editpage(p, appendtext=ts)
self.assertFalse(hasattr(p, '_text'))
p = pywikibot.Page(self.site, 'User:John Vandenberg/appendtext test')
self.assertTrue(p.text.endswith(ts))
self.assertNotEqual(p.text, ts)
class TestSiteMergeHistory(TestCase):
"""Test history merge action."""
family = 'wikipedia'
code = 'test'
write = True
rights = 'mergehistory'
def setup_test_pages(self):
"""Helper function to set up pages that we will use in these tests."""
site = self.get_site()
source = pywikibot.Page(site, 'User:Sn1per/MergeTest1')
dest = pywikibot.Page(site, 'User:Sn1per/MergeTest2')
# Make sure the wiki supports action=mergehistory
if site.mw_version < '1.27.0-wmf.13':
self.skipTest('Wiki version must be 1.27.0-wmf.13 or newer to '
'support the history merge API.')
if source.exists():
source.delete('Pywikibot merge history unit test')
if dest.exists():
dest.delete('Pywikibot merge history unit test')
source.text = 'Lorem ipsum dolor sit amet'
source.save()
first_rev = source.editTime()
source.text = 'Lorem ipsum dolor sit amet is a common test phrase'
source.save()
second_rev = source.editTime()
dest.text = 'Merge history page unit test destination'
dest.save()
return first_rev, second_rev
def test_merge_history_validation(self):
"""Test Site.merge_history validity checks."""
site = self.get_site()
page_source = pywikibot.Page(site, 'User:Sn1per/MergeTest1')
page_nonexist = pywikibot.Page(site, 'User:Sn1per/Nonexistent')
# Test source and dest validation
test_errors = [
(
{ # source same as dest
'source': page_source,
'dest': page_source,
},
'Cannot merge revisions of [[test:User:Sn1per/MergeTest1]] '
'to itself'
),
(
{ # nonexistent source
'source': page_nonexist,
'dest': page_source,
},
'Cannot merge revisions from source '
'[[test:User:Sn1per/Nonexistent]] because it does not exist '
'on test:test'
),
(
{ # nonexistent dest
'source': page_source,
'dest': page_nonexist,
},
'Cannot merge revisions to destination '
'[[test:User:Sn1per/Nonexistent]] because it does not exist '
'on test:test'
),
]
self.setup_test_pages()
for params, error_msg in test_errors:
try:
site.merge_history(**params)
except Error as err:
self.assertEqual(str(err), error_msg)
def test_merge_history(self):
"""Test Site.merge_history functionality."""
site = self.get_site()
source = pywikibot.Page(site, 'User:Sn1per/MergeTest1')
dest = pywikibot.Page(site, 'User:Sn1per/MergeTest2')
# Without timestamp
self.setup_test_pages()
site.merge_history(source, dest)
self.assertEqual(dest.revision_count(), 3)
# With latest timestamp
revs = self.setup_test_pages()
source.clear_cache() # clear revision cache when page is recreated
dest.clear_cache()
site.merge_history(source, dest, revs[1])
self.assertEqual(dest.revision_count(), 3)
# With middle timestamp
revs = self.setup_test_pages()
source.clear_cache()
dest.clear_cache()
site.merge_history(source, dest, revs[0])
self.assertEqual(dest.revision_count(), 2)
class OAuthEditTest(OAuthSiteTestCase):
"""Run edit test with OAuth enabled."""
family = 'wikipedia'
code = 'test'
write = True
def setUp(self):
"""Set up test by checking site and initialization."""
super().setUp()
self._authenticate = config.authenticate
oauth_tokens = self.consumer_token + self.access_token
config.authenticate[self.site.hostname()] = oauth_tokens
def tearDown(self):
"""Tear down test by resetting config.authenticate."""
super().tearDown()
config.authenticate = self._authenticate
def test_edit(self):
"""Test editing to a page."""
self.site.login()
self.assertTrue(self.site.logged_in())
ts = str(time.time())
p = pywikibot.Page(self.site,
'User:{}/edit test'.format(self.site.username()))
p.site.editpage(p, appendtext=ts)
revision_id = p.latest_revision_id
p = pywikibot.Page(self.site,
'User:{}/edit test'.format(self.site.username()))
self.assertEqual(revision_id, p.latest_revision_id)
self.assertTrue(p.text.endswith(ts))
if __name__ == '__main__': # pragma: no cover
with suppress(SystemExit):
unittest.main()
| |
# -*- coding: utf-8 -*-
import os
import io
from collections import OrderedDict
import pytest
from boussole.exceptions import SettingsDiscoveryError
from boussole.conf.discovery import Discover
from boussole.conf.json_backend import SettingsBackendJson
from boussole.conf.yaml_backend import SettingsBackendYaml
class DummyTestBackend:
"""
Dummy backend object just with object attributes required for
discovering.
"""
_default_filename = "boussole.dum"
_kind_name = "dummy"
_file_extension = "dum"
@pytest.mark.parametrize("backends,expected_engines,expected_exts,expected_filenames", [
(
[],
(),
(),
(),
),
(
[DummyTestBackend],
(
(DummyTestBackend._kind_name, DummyTestBackend),
),
(
(DummyTestBackend._default_filename, DummyTestBackend._kind_name),
),
(
(DummyTestBackend._file_extension, DummyTestBackend._kind_name),
),
),
(
[SettingsBackendJson, SettingsBackendYaml, DummyTestBackend],
(
(SettingsBackendJson._kind_name, SettingsBackendJson),
(SettingsBackendYaml._kind_name, SettingsBackendYaml),
(DummyTestBackend._kind_name, DummyTestBackend),
),
(
(SettingsBackendJson._default_filename, SettingsBackendJson._kind_name),
(SettingsBackendYaml._default_filename, SettingsBackendYaml._kind_name),
(DummyTestBackend._default_filename, DummyTestBackend._kind_name),
),
(
(SettingsBackendJson._file_extension, SettingsBackendJson._kind_name),
(SettingsBackendYaml._file_extension, SettingsBackendYaml._kind_name),
(DummyTestBackend._file_extension, DummyTestBackend._kind_name),
),
),
])
def test_discover_scan_backends(backends, expected_engines, expected_exts,
expected_filenames):
"""
Discover init engines from default backends
"""
disco = Discover()
engines, filenames, extensions = disco.scan_backends(backends)
assert engines == OrderedDict(expected_engines)
assert filenames == OrderedDict(expected_exts)
assert extensions == OrderedDict(expected_filenames)
@pytest.mark.parametrize("filepath,kind,name", [
("foo.json", None, "json"),
("foo.yml", None, "yaml"),
("/home/foo.json", None, "json"),
("/home/json/foo.yml", None, "yaml"),
(".foo", "json", "json"),
(".foo", "yaml", "yaml"),
("/home/bar/.foo", "yaml", "yaml"),
("foo.bar", "yaml", "yaml"),
("foo.json", "yaml", "yaml"),
])
def test_get_backend_success(filepath, kind, name):
"""
Discover backend from given filename and kind
"""
disco = Discover([SettingsBackendJson, SettingsBackendYaml])
backend = disco.get_engine(filepath, kind=kind)
assert backend._kind_name == name
@pytest.mark.parametrize("filepath,kind", [
(".foo", None),
("foo.bar", None),
("foo.json.bar", None),
("/home/bar/.foo", None),
("foo.json", "wrong"),
])
def test_get_backend_fail(filepath, kind):
"""
Error on discovering backend from given filename
"""
disco = Discover([SettingsBackendJson, SettingsBackendYaml])
with pytest.raises(SettingsDiscoveryError):
disco.get_engine(filepath, kind=kind)
def test_search_empty():
"""
Error if basedir and filepath are empty
"""
disco = Discover()
with pytest.raises(SettingsDiscoveryError):
disco.search()
@pytest.mark.parametrize("datas", [
# Absolute filepath to a json file
({
"id": "absolute_filepath_json",
"filepath": "BASEDIR_PREPEND/foo.json",
"basedir": None,
"kind": None,
"fake_filename": "foo.json",
"backends": [SettingsBackendJson, SettingsBackendYaml],
"expected_engine": "json"
}),
# Absolute filepath to a yaml file
({
"id": "absolute_filepath_yaml",
"filepath": "BASEDIR_PREPEND/foo.yml",
"basedir": None,
"kind": None,
"fake_filename": "foo.yml",
"backends": [SettingsBackendJson, SettingsBackendYaml],
"expected_engine": "yaml"
}),
# Absolute filepath to a json file and forced to json (although it"s not
# useful)
({
"id": "absolute_filepath_json_forced_to_json",
"filepath": "BASEDIR_PREPEND/foo.json",
"basedir": None,
"kind": "json",
"fake_filename": "foo.json",
"backends": [SettingsBackendJson, SettingsBackendYaml],
"expected_engine": "json"
}),
# Absolute filepath to a json file but forced to yaml
({
"id": "absolute_filepath_json_forced_to_yaml",
"filepath": "BASEDIR_PREPEND/foo.json",
"basedir": None,
"kind": "yaml",
"fake_filename": "foo.json",
"backends": [SettingsBackendJson, SettingsBackendYaml],
"expected_engine": "yaml"
}),
# Relative filepath to a json file and with basedir given
({
"id": "relative_filepath_json_with_basedir",
"filepath": "foo.json",
"basedir": "BASEDIR_PREPEND",
"kind": "json",
"fake_filename": "foo.json",
"backends": [SettingsBackendJson, SettingsBackendYaml],
"expected_engine": "json"
}),
# Given only basedir which contain a json file
({
"id": "no_filepath_with_basedir_json",
"filepath": None,
"basedir": "BASEDIR_PREPEND",
"kind": None,
"fake_filename": SettingsBackendJson._default_filename,
"backends": [SettingsBackendJson, SettingsBackendYaml],
"expected_engine": "json"
}),
# Given only basedir which contain a yaml file
({
"id": "no_filepath_with_basedir_yaml",
"filepath": None,
"basedir": "BASEDIR_PREPEND",
"kind": None,
"fake_filename": SettingsBackendYaml._default_filename,
"backends": [SettingsBackendJson, SettingsBackendYaml],
"expected_engine": "yaml"
}),
])
def test_search_success(temp_builds_dir, datas):
"""
Test discover search which should succeed
"""
tmp_dirname = "discovery_search_{}".format(datas["id"])
test_basedir = temp_builds_dir.join(tmp_dirname).strpath
os.makedirs(test_basedir)
# Prepend path with created temporary base directory since it can not
# exists yet in parameter values
if datas["filepath"] and datas["filepath"].startswith("BASEDIR_PREPEND"):
datas["filepath"] = datas["filepath"].replace("BASEDIR_PREPEND", test_basedir)
if datas["basedir"] and datas["basedir"].startswith("BASEDIR_PREPEND"):
datas["basedir"] = datas["basedir"].replace("BASEDIR_PREPEND", test_basedir)
# Create a dummy settings file in temp base directory
if datas["fake_filename"]:
settings_filepath = os.path.join(test_basedir, datas["fake_filename"])
with io.open(settings_filepath, "w", encoding="utf-8") as f:
f.write("""Dummy""")
# Makes search
disco = Discover(backends=datas["backends"])
discovered_filepath, discovered_engine = disco.search(
filepath=datas["filepath"],
basedir=datas["basedir"],
kind=datas["kind"]
)
assert datas["expected_engine"] == discovered_engine._kind_name
@pytest.mark.parametrize("datas", [
# Given only basedir which contain a json file but only yaml engine
# is available
# This one should be in a fail test: Relative filepath with an empty
({
"id": "no_filepath_with_basedir_nonavailable_engine",
"filepath": None,
"basedir": "BASEDIR_PREPEND",
"kind": None,
"fake_filename": SettingsBackendJson._default_filename,
"backends": [SettingsBackendYaml],
"expected_exception": SettingsDiscoveryError
}),
# Relative filepath to a json file
# This one should be in a fail test: Relative filepath with an empty
# basedir cant work within tests since fake file in in temporary directory
({
"id": "relative_filepath_json",
"filepath": "foo.json",
"basedir": None,
"kind": None,
"fake_filename": "foo.json",
"backends": [SettingsBackendJson, SettingsBackendYaml],
"expected_exception": SettingsDiscoveryError
}),
# Given basedir which contain a file with json extension but backend
# explicitely specify yaml backend
# This one should be in a fail test: giving an explicit backend name must
# match the implicit filename.
({
"id": "with_basedir_and_backend",
"filepath": None,
"basedir": "BASEDIR_PREPEND",
"kind": "yaml",
"fake_filename": "foo.json",
"backends": [SettingsBackendJson, SettingsBackendYaml],
"expected_exception": SettingsDiscoveryError
}),
])
def test_search_fail(temp_builds_dir, datas):
"""
Test discover search which should fails
"""
tmp_dirname = "discovery_search_{}".format(datas["id"])
test_basedir = temp_builds_dir.join(tmp_dirname).strpath
os.makedirs(test_basedir)
# Prepend path with created temporary base directory since it can not
# exists yet in parameter values
if datas["filepath"] and datas["filepath"].startswith("BASEDIR_PREPEND"):
datas["filepath"] = datas["filepath"].replace("BASEDIR_PREPEND", test_basedir)
if datas["basedir"] and datas["basedir"].startswith("BASEDIR_PREPEND"):
datas["basedir"] = datas["basedir"].replace("BASEDIR_PREPEND", test_basedir)
# Create a dummy settings file in temp base directory
if datas["fake_filename"]:
settings_filepath = os.path.join(test_basedir, datas["fake_filename"])
with io.open(settings_filepath, "w", encoding="utf-8") as f:
f.write("""Dummy""")
# Makes search
disco = Discover(backends=datas["backends"])
with pytest.raises(datas["expected_exception"]):
disco.search(
filepath=datas["filepath"],
basedir=datas["basedir"],
kind=datas["kind"]
)
| |
import pytest
from dash import Dash, Input, Output, State, html
from dash.exceptions import InvalidCallbackReturnValue, IncorrectTypeException
def test_cbva001_callback_dep_types():
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("child", id="in1"),
html.Div("state", id="state1"),
html.Div(id="out1"),
html.Div("child", id="in2"),
html.Div("state", id="state2"),
html.Div(id="out2"),
html.Div("child", id="in3"),
html.Div("state", id="state3"),
html.Div(id="out3"),
]
)
with pytest.raises(IncorrectTypeException) as err:
@app.callback(Input("in1", "children"), Output("out1", "children"))
def f2(i):
return i
pytest.fail("out-of-order args")
assert "Outputs first,\nthen all Inputs, then all States." in err.value.args[0]
assert "<Input `in1.children`>" in err.value.args[0]
assert "<Output `out1.children`>" in err.value.args[0]
# all OK with tuples
@app.callback(
(Output("out1", "children"),),
(Input("in1", "children"),),
(State("state1", "children"),),
)
def f1(i):
return i
# all OK with all args in single list
@app.callback(
Output("out2", "children"),
Input("in2", "children"),
State("state2", "children"),
)
def f3(i):
return i
# all OK with lists
@app.callback(
[Output("out3", "children")],
[Input("in3", "children")],
[State("state3", "children")],
)
def f4(i):
return i
def test_cbva002_callback_return_validation():
app = Dash(__name__)
app.layout = html.Div(
[
html.Div(id="a"),
html.Div(id="b"),
html.Div(id="c"),
html.Div(id="d"),
html.Div(id="e"),
html.Div(id="f"),
]
)
@app.callback(Output("b", "children"), [Input("a", "children")])
def single(a):
return set([1])
with pytest.raises(InvalidCallbackReturnValue):
# outputs_list (normally callback_context.outputs_list) is provided
# by the dispatcher from the request.
single("aaa", outputs_list={"id": "b", "property": "children"})
pytest.fail("not serializable")
@app.callback(
[Output("c", "children"), Output("d", "children")], [Input("a", "children")]
)
def multi(a):
return [1, set([2])]
with pytest.raises(InvalidCallbackReturnValue):
outputs_list = [
{"id": "c", "property": "children"},
{"id": "d", "property": "children"},
]
multi("aaa", outputs_list=outputs_list)
pytest.fail("nested non-serializable")
@app.callback(
[Output("e", "children"), Output("f", "children")], [Input("a", "children")]
)
def multi2(a):
return ["abc"]
with pytest.raises(InvalidCallbackReturnValue):
outputs_list = [
{"id": "e", "property": "children"},
{"id": "f", "property": "children"},
]
multi2("aaa", outputs_list=outputs_list)
pytest.fail("wrong-length list")
def test_cbva003_list_single_output(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Div("Hi", id="in"), html.Div(id="out1"), html.Div(id="out2")]
)
@app.callback(Output("out1", "children"), Input("in", "children"))
def o1(i):
return "1: " + i
@app.callback([Output("out2", "children")], [Input("in", "children")])
def o2(i):
return ("2: " + i,)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out1", "1: Hi")
dash_duo.wait_for_text_to_equal("#out2", "2: Hi")
@pytest.mark.parametrize("named_out", [True, False])
@pytest.mark.parametrize("named_in,named_state", [(True, True), (False, False)])
def test_cbva004_named_args(named_out, named_in, named_state, dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("Hi", id="in"),
html.Div("gh", id="state"),
html.Div(id="out1"),
html.Div(id="out2"),
]
)
def make_args(*a):
args = []
kwargs = {}
names = ["output", "inputs", "state"]
flags = [named_out, named_in, named_state]
for ai, name, flag in zip(a, names, flags):
if flag:
kwargs[name] = ai
else:
args.append(ai)
return args, kwargs
args, kwargs = make_args(
Output("out1", "children"), Input("in", "children"), State("state", "children")
)
@app.callback(*args, **kwargs)
def o1(i, s):
return "1: " + i + s
args, kwargs = make_args(
[Output("out2", "children")],
[Input("in", "children")],
[State("state", "children")],
)
@app.callback(*args, **kwargs)
def o2(i, s):
return ("2: " + i + s,)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out1", "1: High")
dash_duo.wait_for_text_to_equal("#out2", "2: High")
def test_cbva005_tuple_args(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Div("Yo", id="in1"),
html.Div("lo", id="in2"),
html.Div(id="out1"),
html.Div(id="out2"),
]
)
@app.callback(
Output("out1", "children"), (Input("in1", "children"), Input("in2", "children"))
)
def f(i1, i2):
return "1: " + i1 + i2
@app.callback(
(Output("out2", "children"),),
Input("in1", "children"),
(State("in2", "children"),),
)
def g(i1, i2):
return ("2: " + i1 + i2,)
dash_duo.start_server(app)
dash_duo.wait_for_text_to_equal("#out1", "1: Yolo")
dash_duo.wait_for_text_to_equal("#out2", "2: Yolo")
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(
[functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)] +
shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(
shape[:ndims - 1] +
[functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(indices, updates, shape):
ref = np.zeros(shape, dtype=updates.dtype)
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
class ScatterNdTest(xla_test.XLATestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
# Scatter via numpy
np_out = np_scatter(indices, updates, ref_shape)
# Scatter via tensorflow
tf_out = tf_scatter(indices, updates, ref_shape)
self.assertAllClose(np_out, tf_out)
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in self.numeric_types:
for itype in set([np.int32, np.int64]).intersection(set(self.int_types)):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def _runScatterNd(self, indices, updates, shape):
with self.session():
updates_placeholder = array_ops.placeholder(updates.dtype)
indices_placeholder = array_ops.placeholder(indices.dtype)
with self.test_scope():
output = array_ops.scatter_nd(indices_placeholder, updates_placeholder,
shape)
feed_dict = {updates_placeholder: updates, indices_placeholder: indices}
return output.eval(feed_dict=feed_dict)
def testSimple(self):
indices = np.array([[4], [3], [1], [7]], dtype=np.int32)
updates = np.array([9, 10, 11, 12], dtype=np.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12], dtype=np.int32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [8]))
def testRepeatedIndices(self):
indices = np.array([[0], [1], [0], [1]], dtype=np.int32)
updates = np.array([9, 10, 11, 12], dtype=np.float32)
expected = np.array([20, 22], dtype=np.int32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [2]))
def testSimple2(self):
indices = np.array([[1, 0], [1, 1]], dtype=np.int32)
updates = np.array([11., 12.], dtype=np.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]], dtype=np.float32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [3, 2]))
def testSimple3(self):
indices = np.array([[1]], dtype=np.int32)
updates = np.array([[11., 12.]], dtype=np.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [3, 2]))
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, self._runScatterNd)
def testExtraIndicesDimensions(self):
indices = np.zeros([1, 1, 2], np.int32)
updates = np.zeros([1, 1], np.int32)
expected = np.zeros([2, 2], dtype=np.int32)
self.assertAllEqual(expected, self._runScatterNd(indices, updates, [2, 2]))
@test_util.disable_mlir_bridge("Error messages differ")
def testRank3InvalidShape1(self):
indices = np.zeros([3, 2, 2], np.int32)
updates = np.zeros([2, 2, 2], np.int32)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Must have updates.shape"):
self._runScatterNd(indices, updates, [2, 2, 2])
@test_util.disable_mlir_bridge("Error messages differ")
def testRank3InvalidShape2(self):
indices = np.zeros([2, 2, 1], np.int32)
updates = np.zeros([2, 2], np.int32)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Must have updates.shape"):
self._runScatterNd(indices, updates, [2, 2, 2])
def testScatterOutOfRange(self):
updates = np.array([-3, -4, -5]).astype(np.float32)
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]], dtype=np.int32)
self._runScatterNd(indices, updates, [6])
# Indices out of range should not fail. It produces implementation-defined
# output.
indices = np.array([[-1], [0], [5]], dtype=np.int32)
self._runScatterNd(indices, updates, [6])
indices = np.array([[2], [0], [6]], dtype=np.int32)
self._runScatterNd(indices, updates, [6])
class ScatterNdTensorTest(xla_test.XLATestCase):
def _runScatter(self, op):
indices_np = np.array([[4], [3], [1], [7]], dtype=np.int32)
updates_np = np.array([9, 10, 11, 12], dtype=np.float32)
with self.session() as sess, self.test_scope():
indices = array_ops.placeholder(indices_np.dtype, shape=indices_np.shape)
updates = array_ops.placeholder(updates_np.dtype, shape=updates_np.shape)
t = array_ops.ones([8], dtype=np.float32)
out = op(t, indices, updates)
return sess.run(out, feed_dict={indices: indices_np, updates: updates_np})
def testAdd(self):
self.assertAllEqual(
self._runScatter(array_ops.tensor_scatter_add),
np.array([1, 12, 1, 11, 10, 1, 1, 13], dtype=np.float32))
def testSub(self):
self.assertAllEqual(
self._runScatter(array_ops.tensor_scatter_sub),
np.array([1, -10, 1, -9, -8, 1, 1, -11], dtype=np.float32))
def testUpdate(self):
self.assertAllEqual(
self._runScatter(array_ops.tensor_scatter_update),
np.array([1, 11, 1, 10, 9, 1, 1, 12], dtype=np.float32))
if __name__ == "__main__":
test.main()
| |
import asyncio
import collections
import logging
from itertools import chain
import kafka.common as Errors
from kafka.common import TopicPartition
from kafka.protocol.fetch import FetchRequest
from kafka.protocol.message import PartialMessage
from kafka.protocol.offset import OffsetRequest, OffsetResetStrategy
from aiokafka import ensure_future
log = logging.getLogger(__name__)
ConsumerRecord = collections.namedtuple(
"ConsumerRecord", ["topic", "partition", "offset", "key", "value"])
class NoOffsetForPartitionError(Errors.KafkaError):
pass
class RecordTooLargeError(Errors.KafkaError):
pass
class FetchResult:
def __init__(self, tp, *, subscriptions, loop, messages, backoff):
self._topic_partition = tp
self._subscriptions = subscriptions
self._messages = messages
self._created = loop.time()
self._backoff = backoff
self._loop = loop
def calculate_backoff(self):
lifetime = self._loop.time() - self._created
if lifetime < self._backoff:
return self._backoff - lifetime
return 0
def _check_assignment(self, tp):
if self._subscriptions.needs_partition_assignment or \
not self._subscriptions.is_fetchable(tp):
# this can happen when a rebalance happened before
# fetched records are returned
log.debug("Not returning fetched records for partition %s"
" since it is no fetchable (unassigned or paused)", tp)
self._messages.clear()
return False
return True
def getone(self):
tp = self._topic_partition
if not self._check_assignment(tp):
return
while True:
if not self._messages:
return
msg = self._messages.popleft()
if msg.offset == self._subscriptions.assignment[tp].position:
# Compressed messagesets may include earlier messages
# It is also possible that the user called seek()
self._subscriptions.assignment[tp].position += 1
return msg
def getall(self):
tp = self._topic_partition
if not self._check_assignment(tp):
return []
ret_list = []
while True:
if not self._messages:
return ret_list
msg = self._messages.popleft()
if msg.offset == self._subscriptions.assignment[tp].position:
# Compressed messagesets may include earlier messages
# It is also possible that the user called seek()
self._subscriptions.assignment[tp].position += 1
ret_list.append(msg)
class FetchError:
def __init__(self, *, loop, error, backoff):
self._error = error
self._created = loop.time()
self._backoff = backoff
self._loop = loop
def calculate_backoff(self):
lifetime = self._loop.time() - self._created
if lifetime < self._backoff:
return self._backoff - lifetime
return 0
def check_raise(self):
# TODO: Do we need to raise error if partition not assigned anymore
raise self._error
class Fetcher:
def __init__(self, client, subscriptions, *, loop,
key_deserializer=None,
value_deserializer=None,
fetch_min_bytes=1,
fetch_max_wait_ms=500,
max_partition_fetch_bytes=1048576,
check_crcs=True,
fetcher_timeout=0.1,
prefetch_backoff=0.1):
"""Initialize a Kafka Message Fetcher.
Parameters:
client (AIOKafkaClient): kafka client
subscription (SubscriptionState): instance of SubscriptionState
located in kafka.consumer.subscription_state
key_deserializer (callable): Any callable that takes a
raw message key and returns a deserialized key.
value_deserializer (callable, optional): Any callable that takes a
raw message value and returns a deserialized value.
fetch_min_bytes (int): Minimum amount of data the server should
return for a fetch request, otherwise wait up to
fetch_max_wait_ms for more data to accumulate. Default: 1.
fetch_max_wait_ms (int): The maximum amount of time in milliseconds
the server will block before answering the fetch request if
there isn't sufficient data to immediately satisfy the
requirement given by fetch_min_bytes. Default: 500.
max_partition_fetch_bytes (int): The maximum amount of data
per-partition the server will return. The maximum total memory
used for a request = #partitions * max_partition_fetch_bytes.
This size must be at least as large as the maximum message size
the server allows or else it is possible for the producer to
send messages larger than the consumer can fetch. If that
happens, the consumer can get stuck trying to fetch a large
message on a certain partition. Default: 1048576.
check_crcs (bool): Automatically check the CRC32 of the records
consumed. This ensures no on-the-wire or on-disk corruption to
the messages occurred. This check adds some overhead, so it may
be disabled in cases seeking extreme performance. Default: True
fetcher_timeout (float): number of second to poll necessity to send
next fetch request. Default: 0.1
"""
self._client = client
self._loop = loop
self._key_deserializer = key_deserializer
self._value_deserializer = value_deserializer
self._fetch_min_bytes = fetch_min_bytes
self._fetch_max_wait_ms = fetch_max_wait_ms
self._max_partition_fetch_bytes = max_partition_fetch_bytes
self._check_crcs = check_crcs
self._fetcher_timeout = fetcher_timeout
self._prefetch_backoff = prefetch_backoff
self._subscriptions = subscriptions
self._records = collections.OrderedDict()
self._in_flight = set()
self._fetch_tasks = set()
self._wait_consume_future = None
self._wait_empty_future = None
self._fetch_task = ensure_future(
self._fetch_requests_routine(), loop=loop)
@asyncio.coroutine
def close(self):
self._fetch_task.cancel()
try:
yield from self._fetch_task
except asyncio.CancelledError:
pass
for x in self._fetch_tasks:
x.cancel()
try:
yield from x
except asyncio.CancelledError:
pass
@asyncio.coroutine
def _fetch_requests_routine(self):
""" Background task, that always prefetches next result page.
The algorithm:
* Group partitions per node, which is the leader for it.
* If all partitions for this node need prefetch - do it right alway
* If any partition has some data (in `self._records`) wait up till
`fetcher_timeout` so application can consume data from it.
* If data in `self._records` is not consumed up to
`fetcher_timeout` just request data for other partitions from this
node.
We request data in such manner cause Kafka blocks the connection if
we perform a FetchRequest and we don't have enough data. This means
we must perform a FetchRequest to as many partitions as we can in a
node.
Original java Kafka client processes data differently, as it only
prefetches data if all messages were given to application (i.e. if
`self._records` are empty). We don't use this method, cause we allow
to process partitions separately (by passing `partitions` list to
`getall()` call of the consumer), which can end up in a long wait
if some partitions (or topics) are processed slower, than others.
"""
try:
while True:
# Reset consuming signal future.
self._wait_consume_future = asyncio.Future(loop=self._loop)
# Create and send fetch requests
requests, timeout = self._create_fetch_requests()
for node_id, request in requests:
node_ready = yield from self._client.ready(node_id)
if not node_ready:
# We will request it on next routine
continue
log.debug("Sending FetchRequest to node %s", node_id)
task = ensure_future(
self._proc_fetch_request(node_id, request),
loop=self._loop)
self._fetch_tasks.add(task)
self._in_flight.add(node_id)
done_set, _ = yield from asyncio.wait(
chain(self._fetch_tasks, [self._wait_consume_future]),
loop=self._loop,
timeout=timeout,
return_when=asyncio.FIRST_COMPLETED)
# Process fetch tasks results if any
done_fetches = self._fetch_tasks.intersection(done_set)
if done_fetches:
has_new_data = any(fut.result() for fut in done_fetches)
if has_new_data:
# we added some messages to self._records,
# wake up getters
self._notify(self._wait_empty_future)
self._fetch_tasks -= done_fetches
except asyncio.CancelledError:
pass
except Exception: # noqa
log.error("Unexpected error in fetcher routine", exc_info=True)
def _notify(self, future):
if future is not None and not future.done():
future.set_result(None)
def _create_fetch_requests(self):
"""Create fetch requests for all assigned partitions, grouped by node.
FetchRequests skipped if:
* no leader, or node has already fetches in flight
* we have data for this partition
* we have data for other partitions on this node
Returns:
dict: {node_id: FetchRequest, ...}
"""
if self._subscriptions.needs_partition_assignment:
return {}, self._fetcher_timeout
# create the fetch info as a dict of lists of partition info tuples
# which can be passed to FetchRequest() via .items()
fetchable = collections.defaultdict(
lambda: collections.defaultdict(list))
backoff_by_nodes = collections.defaultdict(list)
fetchable_partitions = self._subscriptions.fetchable_partitions()
for tp in fetchable_partitions:
node_id = self._client.cluster.leader_for_partition(tp)
if tp in self._records:
record = self._records[tp]
# Calculate backoff for this node if data is only recently
# fetched. If data is consumed before backoff we will
# include this partition in this fetch request
backoff = record.calculate_backoff()
if backoff:
backoff_by_nodes[node_id].append(backoff)
# We have some prefetched data for this partition already
continue
if node_id in self._in_flight:
# We have in-flight fetches to this node
continue
if node_id is None or node_id == -1:
log.debug("No leader found for partition %s."
" Waiting metadata update", tp)
else:
# fetch if there is a leader and no in-flight requests
position = self._subscriptions.assignment[tp].position
partition_info = (
tp.partition,
position,
self._max_partition_fetch_bytes)
fetchable[node_id][tp.topic].append(partition_info)
log.debug(
"Adding fetch request for partition %s at offset %d",
tp, position)
requests = []
for node_id, partition_data in fetchable.items():
if node_id in backoff_by_nodes:
# At least one partition is still waiting to be consumed
continue
req = FetchRequest(
-1, # replica_id
self._fetch_max_wait_ms,
self._fetch_min_bytes,
partition_data.items())
requests.append((node_id, req))
if backoff_by_nodes:
# Return min time til any node will be ready to send event
# (max of it's backoffs)
backoff = min(map(max, backoff_by_nodes.values()))
else:
backoff = self._fetcher_timeout
return requests, backoff
@asyncio.coroutine
def _proc_fetch_request(self, node_id, request):
needs_wakeup = False
try:
response = yield from self._client.send(node_id, request)
except Errors.KafkaError as err:
log.error("Failed fetch messages from %s: %s", node_id, err)
return False
finally:
self._in_flight.remove(node_id)
fetch_offsets = {}
for topic, partitions in request.topics:
for partition, offset, _ in partitions:
fetch_offsets[TopicPartition(topic, partition)] = offset
for topic, partitions in response.topics:
for partition, error_code, highwater, messages in partitions:
tp = TopicPartition(topic, partition)
error_type = Errors.for_code(error_code)
if not self._subscriptions.is_fetchable(tp):
# this can happen when a rebalance happened
log.debug("Ignoring fetched records for partition %s"
" since it is no longer fetchable", tp)
elif error_type is Errors.NoError:
self._subscriptions.assignment[tp].highwater = highwater
# we are interested in this fetch only if the beginning
# offset matches the current consumed position
fetch_offset = fetch_offsets[tp]
partial = None
if messages and \
isinstance(messages[-1][-1], PartialMessage):
partial = messages.pop()
if messages:
log.debug(
"Adding fetched record for partition %s with"
" offset %d to buffered record list",
tp, fetch_offset)
try:
messages = collections.deque(
self._unpack_message_set(tp, messages))
except Errors.InvalidMessageError as err:
self._set_error(tp, err)
continue
self._records[tp] = FetchResult(
tp, messages=messages,
subscriptions=self._subscriptions,
backoff=self._prefetch_backoff,
loop=self._loop)
# We added at least 1 successful record
needs_wakeup = True
elif partial:
# we did not read a single message from a non-empty
# buffer because that message's size is larger than
# fetch size, in this case record this exception
err = RecordTooLargeError(
"There are some messages at [Partition=Offset]: "
"%s=%s whose size is larger than the fetch size %s"
" and hence cannot be ever returned. "
"Increase the fetch size, or decrease the maximum "
"message size the broker will allow.",
tp, fetch_offset, self._max_partition_fetch_bytes)
self._set_error(tp, err)
needs_wakeup = True
self._subscriptions.assignment[tp].position += 1
elif error_type in (Errors.NotLeaderForPartitionError,
Errors.UnknownTopicOrPartitionError):
self._client.force_metadata_update()
elif error_type is Errors.OffsetOutOfRangeError:
fetch_offset = fetch_offsets[tp]
if self._subscriptions.has_default_offset_reset_policy():
self._subscriptions.need_offset_reset(tp)
else:
err = Errors.OffsetOutOfRangeError({tp: fetch_offset})
self._set_error(tp, err)
needs_wakeup = True
log.info(
"Fetch offset %s is out of range, resetting offset",
fetch_offset)
elif error_type is Errors.TopicAuthorizationFailedError:
log.warn("Not authorized to read from topic %s.", tp.topic)
err = Errors.TopicAuthorizationFailedError(tp.topic)
self._set_error(tp, err)
needs_wakeup = True
else:
log.warn('Unexpected error while fetching data: %s',
error_type.__name__)
return needs_wakeup
def _set_error(self, tp, error):
assert tp not in self._records
self._records[tp] = FetchError(
error=error, backoff=self._prefetch_backoff, loop=self._loop)
@asyncio.coroutine
def update_fetch_positions(self, partitions):
"""Update the fetch positions for the provided partitions.
Arguments:
partitions (list of TopicPartitions): partitions to update
Raises:
NoOffsetForPartitionError: if no offset is stored for a given
partition and no reset policy is available
"""
futures = []
# reset the fetch position to the committed position
for tp in partitions:
if not self._subscriptions.is_assigned(tp):
log.warning("partition %s is not assigned - skipping offset"
" update", tp)
continue
elif self._subscriptions.is_fetchable(tp):
log.warning(
"partition %s is still fetchable -- skipping offset"
" update", tp)
continue
if self._subscriptions.is_offset_reset_needed(tp):
futures.append(self._reset_offset(tp))
elif self._subscriptions.assignment[tp].committed is None:
# there's no committed position, so we need to reset with the
# default strategy
self._subscriptions.need_offset_reset(tp)
futures.append(self._reset_offset(tp))
else:
committed = self._subscriptions.assignment[tp].committed
log.debug("Resetting offset for partition %s to the committed"
" offset %s", tp, committed)
self._subscriptions.seek(tp, committed)
if futures:
done, _ = yield from asyncio.wait(
futures, return_when=asyncio.ALL_COMPLETED, loop=self._loop)
# retrieve task result, can raise exception
[x.result() for x in done]
@asyncio.coroutine
def _reset_offset(self, partition):
"""Reset offsets for the given partition using
the offset reset strategy.
Arguments:
partition (TopicPartition): the partition that needs reset offset
Raises:
NoOffsetForPartitionError: if no offset reset strategy is defined
"""
timestamp = self._subscriptions.assignment[partition].reset_strategy
if timestamp is OffsetResetStrategy.EARLIEST:
strategy = 'earliest'
else:
strategy = 'latest'
log.debug("Resetting offset for partition %s to %s offset.",
partition, strategy)
offset = yield from self._offset(partition, timestamp)
# we might lose the assignment while fetching the offset,
# so check it is still active
if self._subscriptions.is_assigned(partition):
self._subscriptions.seek(partition, offset)
@asyncio.coroutine
def _offset(self, partition, timestamp):
"""Fetch a single offset before the given timestamp for the partition.
Blocks until offset is obtained or a non-retriable exception is raised
Arguments:
partition The partition that needs fetching offset.
timestamp (int): timestamp for fetching offset. -1 for the latest
available, -2 for the earliest available. Otherwise timestamp
is treated as epoch seconds.
Returns:
int: message offset
"""
while True:
try:
offset = yield from self._proc_offset_request(
partition, timestamp)
except Errors.KafkaError as error:
if not error.retriable:
raise error
if error.invalid_metadata:
yield from self._client.force_metadata_update()
else:
return offset
@asyncio.coroutine
def _proc_offset_request(self, partition, timestamp):
"""Fetch a single offset before the given timestamp for the partition.
Arguments:
partition (TopicPartition): partition that needs fetching offset
timestamp (int): timestamp for fetching offset
Returns:
Future: resolves to the corresponding offset
"""
node_id = self._client.cluster.leader_for_partition(partition)
if node_id is None:
log.debug("Partition %s is unknown for fetching offset,"
" wait for metadata refresh", partition)
raise Errors.StaleMetadata(partition)
elif node_id == -1:
log.debug(
"Leader for partition %s unavailable for fetching offset,"
" wait for metadata refresh", partition)
raise Errors.LeaderNotAvailableError(partition)
request = OffsetRequest(
-1, [(partition.topic, [(partition.partition, timestamp, 1)])]
)
if not (yield from self._client.ready(node_id)):
raise Errors.NodeNotReadyError(node_id)
response = yield from self._client.send(node_id, request)
topic, partition_info = response.topics[0]
assert len(response.topics) == 1 and len(partition_info) == 1, (
'OffsetResponse should only be for a single topic-partition')
part, error_code, offsets = partition_info[0]
assert topic == partition.topic and part == partition.partition, (
'OffsetResponse partition does not match OffsetRequest partition')
error_type = Errors.for_code(error_code)
if error_type is Errors.NoError:
assert len(offsets) == 1, 'Expected OffsetResponse with one offset'
offset = offsets[0]
log.debug("Fetched offset %d for partition %s", offset, partition)
return offset
elif error_type in (Errors.NotLeaderForPartitionError,
Errors.UnknownTopicOrPartitionError):
log.warning("Attempt to fetch offsets for partition %s failed due"
" to obsolete leadership information, retrying.",
partition)
raise error_type(partition)
else:
log.error(
"Attempt to fetch offsets for partition %s failed due to:"
" %s", partition, error_type)
raise error_type(partition)
@asyncio.coroutine
def next_record(self, partitions):
""" Return one fetched records
This method will contain a little overhead as we will do more work this
way:
* Notify prefetch routine per every consumed partition
* Assure message marked for autocommit
"""
for tp in list(self._records.keys()):
if partitions and tp not in partitions:
continue
res_or_error = self._records[tp]
if type(res_or_error) == FetchResult:
message = res_or_error.getone()
if message is None:
# We already processed all messages, request new ones
del self._records[tp]
self._notify(self._wait_consume_future)
else:
return message
else:
# Remove error, so we can fetch on partition again
del self._records[tp]
self._notify(self._wait_consume_future)
res_or_error.check_raise()
# No messages ready. Wait for some to arrive
if self._wait_empty_future is None or self._wait_empty_future.done():
self._wait_empty_future = asyncio.Future(loop=self._loop)
yield from self._wait_empty_future
return (yield from self.next_record(partitions))
@asyncio.coroutine
def fetched_records(self, partitions, timeout=0):
""" Returns previously fetched records and updates consumed offsets.
"""
drained = {}
for tp in list(self._records.keys()):
if partitions and tp not in partitions:
continue
res_or_error = self._records[tp]
if type(res_or_error) == FetchResult:
drained[tp] = res_or_error.getall()
# We processed all messages - request new ones
del self._records[tp]
self._notify(self._wait_consume_future)
else:
# We already got some of messages from other partition -
# return them. We will raise this error on next call
if drained:
return drained
else:
# Remove error, so we can fetch on partition again
del self._records[tp]
self._notify(self._wait_consume_future)
res_or_error.check_raise()
if drained or not timeout:
return drained
if self._wait_empty_future is None or self._wait_empty_future.done():
self._wait_empty_future = asyncio.Future(loop=self._loop)
done, _ = yield from asyncio.wait(
[self._wait_empty_future], timeout=timeout, loop=self._loop)
if done:
return (yield from self.fetched_records(partitions, 0))
return {}
def _unpack_message_set(self, tp, messages):
for offset, size, msg in messages:
if self._check_crcs and not msg.validate_crc():
raise Errors.InvalidMessageError(msg)
elif msg.is_compressed():
yield from self._unpack_message_set(tp, msg.decompress())
else:
key, value = self._deserialize(msg)
yield ConsumerRecord(
tp.topic, tp.partition, offset, key, value)
def _deserialize(self, msg):
if self._key_deserializer:
key = self._key_deserializer(msg.key)
else:
key = msg.key
if self._value_deserializer:
value = self._value_deserializer(msg.value)
else:
value = msg.value
return key, value
| |
#!/usr/bin/env python
#
# Bootstrap PySDL2 by fetching SDL2 binaries (Windows)
# and the PySDL2 library itself.
#
# Use `locally` bootstrap code to securely fetch SDL2
# binaries using known hash/size combination, unpack
# them locally and optionaly run PySDL2 tests on them.
# This stuff is placed into public domain by
# anatoly techtonik <techtonik@gmail.com>
# --- bootstrap .locally --
#
# this creates .locally/ subdirectory in the script's dir
# and sets a few global variables for convenience:
#
# ROOT - absolute path to source code checkout dir
# LOOT - absolute path to the .locally/ subdir
#
# this provides some helpers:
#
# localdir(name) - returns absolute path to the new `name`
# dir created .locally
# extract_zip(zippath, subdir, target)
# - extracts subdir from the zip file
# getsecure(names)
# - download files and check hash/size
FILESPEC = [
dict(
name='pysdl2',
filename='PySDL2-0.9.3.zip',
hashsize='030f2351d1da387f878e14c8a86e08571d7baf5b 1085634',
url='https://bitbucket.org/marcusva/py-sdl2/downloads/PySDL2-0.9.3.zip',
checkpath='PySDL2'
),
dict(
name='sdl2-win32',
filename='SDL2-2.0.4-win32-x86.zip',
hashsize='ae79e23874c14b911a2a1bf91f234fe17a8ffb1f 410598',
url='http://www.libsdl.org/release/SDL2-2.0.4-win32-x86.zip',
checkpath='PySDL2/SDL2.dll'
),
dict(
name='sdl2-win64',
filename='SDL2-2.0.3-win32-x64.zip',
hashsize='b5c7dcb5d13c480ff5133691ffd96e28e8cb75fa 462897',
url='http://www.libsdl.org/release/SDL2-2.0.3-win32-x64.zip',
checkpath='PySDL2/SDL2.dll'
),
]
# convenient access to table cells as lookup('pysdl2', 'path')
def lookup(name, field, table=FILESPEC):
"""return `field` value for the row with given `name`"""
for row in table:
if row['name'] == name:
return row[field]
import os
import sys
try:
from urllib import urlretrieve # Python 2
except ImportError:
from urllib.request import urlretrieve
# 1. create .locally subdir
ROOT = os.path.abspath(os.path.dirname(__file__))
LOOT = os.path.join(ROOT, '.locally/')
if not os.path.exists(LOOT):
os.mkdir(LOOT)
def localdir(name):
'''create dir in LOOT if needed, return path with ending '/' '''
nupath = LOOT + '/' + name + '/'
if not os.path.exists(nupath):
os.makedirs(nupath)
return nupath
# 2. helpers for secure download with hash and size check
from hashlib import sha1
from os.path import exists, getsize, join
def hashsize(path):
# calculate SHA-1 hash + file size string
size = getsize(path)
h = sha1()
with open(path, 'rb') as source:
h.update(source.read())
return '%s %s' % (h.hexdigest(), size)
class HashSizeCheckFailed(Exception):
'''Throw when downloaded file fails hash and size check.'''
pass
def getsecure(names, targetdir=LOOT):
def check(filepath, shize):
if hashsize(filepath) != shize:
raise HashSizeCheckFailed(
'Hash/Size mismatch for %s\n exp: %s\n act: %s'
% (filepath, shize, hashsize(filepath)))
for name in names:
f = lookup(name, 'filename')
shize = lookup(name, 'hashsize')
url = lookup(name, 'url')
filepath = join(targetdir, f)
downloaded = False
if exists(filepath):
print("Downloading " + f + " skipped (already downloaded)")
else:
print("Downloading %s into %s" % (f, targetdir))
urlretrieve(url, filepath)
downloaded = True
if not shize:
print("Hash/size is not set, check skipped..")
else:
try:
check(filepath, shize)
except HashSizeCheckFailed:
if downloaded:
os.remove(filepath)
raise
# --- /bootstrap
# 3. downloading PySDL2 and SDL2 binary
# [x] Windows 32/64
# [ ] Linux / Mac OS
files = ['pysdl2']
is_32bits = not (sys.maxsize > 2**32)
if is_32bits:
files.append('sdl2-win32')
else:
files.append('sdl2-win64')
print('Downloading PySDL2 and binary for SDL2 lib..')
getsecure(files)
def extract_zip(zippath, subdir, target):
'''extract entries from `subdir` of `zipfile` into `target`/ directory'''
from os.path import join, exists, dirname
import shutil
import zipfile
zf = zipfile.ZipFile(zippath)
dirs = set() # cache to speed up dir creation
for entry in zf.namelist():
if subdir:
if not entry.startswith(subdir + '/'):
continue
else:
outfilename = join(target, entry.replace(subdir + '/', ''))
else:
outfilename = join(target, entry)
# create directory for directory entry
if outfilename.endswith('/'):
if not exists(outfilename):
os.makedirs(outfilename)
continue
# some .zip files don't have directory entries
outdir = dirname(outfilename)
if (outdir not in dirs) and not exists(outdir):
os.makedirs(outdir)
dirs.add(outdir)
print(outfilename)
outfile = open(outfilename, "wb")
infile = zf.open(entry)
shutil.copyfileobj(infile, outfile)
outfile.close()
infile.close()
zf.close()
# unpack everything into PySDL2 subdir
SDL2DIR = localdir('PySDL2')
# and add it to sys.path to make importable
sys.path.insert(0, SDL2DIR)
# PYTHONPATH is needed for test subprocesses
os.environ['PYTHONPATH'] = SDL2DIR
if is_32bits:
zipname = lookup('sdl2-win32', 'filename')
else:
zipname = lookup('sdl2-win64', 'filename')
zippath = LOOT + zipname
if os.path.exists(SDL2DIR + 'README-SDL.txt'):
print("..SDL2 already extracted.")
else:
print("..extracting SDL2..")
extract_zip(zippath, '', SDL2DIR)
zipname = lookup('pysdl2', 'filename')
zippath = LOOT + zipname
subdir = zipname.rsplit('.', 1)[0]
if os.path.exists(SDL2DIR + 'README.txt'):
print("..PySDL2 already extracted.")
else:
print("..extracting PySDL2..")
extract_zip(zippath, subdir, SDL2DIR)
print("..done.")
# 4. import PySDL2
# specify location for SDL2 binaries
print("Setting PYSDL2_DLL_PATH to %s" % SDL2DIR)
os.environ["PYSDL2_DLL_PATH"] = SDL2DIR
# getting versions
import sdl2
import ctypes
pysdl2_version = sdl2.__version__
sdl2_version = sdl2.SDL_version()
sdl2.SDL_GetVersion(ctypes.byref(sdl2_version))
print("Imported PySDL2 %s with SDL %s.%s.%s" % (sdl2.__version__,
sdl2_version.major, sdl2_version.minor, sdl2_version.patch))
print("Bootstrap complete.")
# 5. run tests if executed standalone
if __name__ == '__main__':
print("Press Enter to run tests for PySDL2..")
try:
raw_input() # Python 2
except NameError:
input()
print("Running tests for PySDL2..")
# launch application code
import sdl2.test
sdl2.test.util.runtests.run()
print("Done.")
| |
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from os import path
import typing as T
from . import ExtensionModule, ModuleReturnValue
from .. import build
from .. import mesonlib
from .. import mlog
from ..interpreter.type_checking import CT_BUILD_BY_DEFAULT, CT_INPUT_KW, CT_INSTALL_DIR_KW, CT_INSTALL_TAG_KW, CT_OUTPUT_KW, INSTALL_KW, NoneType, in_set_validator
from ..interpreterbase import FeatureNew
from ..interpreterbase.decorators import ContainerTypeInfo, KwargInfo, noPosargs, typed_kwargs, typed_pos_args
from ..scripts.gettext import read_linguas
if T.TYPE_CHECKING:
from typing_extensions import Literal, TypedDict
from . import ModuleState
from ..build import Target
from ..interpreter import Interpreter
from ..interpreterbase import TYPE_var
from ..mparser import BaseNode
from ..programs import ExternalProgram
class MergeFile(TypedDict):
input: T.List[T.Union[
str, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex,
build.ExtractedObjects, build.GeneratedList, ExternalProgram,
mesonlib.File]]
output: T.List[str]
build_by_default: bool
install: bool
install_dir: T.List[T.Union[str, bool]]
install_tag: T.List[str]
args: T.List[str]
data_dirs: T.List[str]
po_dir: str
type: Literal['xml', 'desktop']
class Gettext(TypedDict):
args: T.List[str]
data_dirs: T.List[str]
install: bool
install_dir: T.Optional[str]
languages: T.List[str]
preset: T.Optional[str]
class ItsJoinFile(TypedDict):
input: T.List[T.Union[
str, build.BuildTarget, build.CustomTarget, build.CustomTargetIndex,
build.ExtractedObjects, build.GeneratedList, ExternalProgram,
mesonlib.File]]
output: T.List[str]
build_by_default: bool
install: bool
install_dir: T.List[T.Union[str, bool]]
install_tag: T.List[str]
its_files: T.List[str]
mo_targets: T.List[T.Union[build.BuildTarget, build.CustomTarget, build.CustomTargetIndex]]
_ARGS: KwargInfo[T.List[str]] = KwargInfo(
'args',
ContainerTypeInfo(list, str),
default=[],
listify=True,
)
_DATA_DIRS: KwargInfo[T.List[str]] = KwargInfo(
'data_dirs',
ContainerTypeInfo(list, str),
default=[],
listify=True
)
PRESET_ARGS = {
'glib': [
'--from-code=UTF-8',
'--add-comments',
# https://developer.gnome.org/glib/stable/glib-I18N.html
'--keyword=_',
'--keyword=N_',
'--keyword=C_:1c,2',
'--keyword=NC_:1c,2',
'--keyword=g_dcgettext:2',
'--keyword=g_dngettext:2,3',
'--keyword=g_dpgettext2:2c,3',
'--flag=N_:1:pass-c-format',
'--flag=C_:2:pass-c-format',
'--flag=NC_:2:pass-c-format',
'--flag=g_dngettext:2:pass-c-format',
'--flag=g_strdup_printf:1:c-format',
'--flag=g_string_printf:2:c-format',
'--flag=g_string_append_printf:2:c-format',
'--flag=g_error_new:3:c-format',
'--flag=g_set_error:4:c-format',
'--flag=g_markup_printf_escaped:1:c-format',
'--flag=g_log:3:c-format',
'--flag=g_print:1:c-format',
'--flag=g_printerr:1:c-format',
'--flag=g_printf:1:c-format',
'--flag=g_fprintf:2:c-format',
'--flag=g_sprintf:2:c-format',
'--flag=g_snprintf:3:c-format',
]
}
class I18nModule(ExtensionModule):
def __init__(self, interpreter: 'Interpreter'):
super().__init__(interpreter)
self.methods.update({
'merge_file': self.merge_file,
'gettext': self.gettext,
'itstool_join': self.itstool_join,
})
self.tools: T.Dict[str, T.Optional[ExternalProgram]] = {
'itstool': None,
'msgfmt': None,
'msginit': None,
'msgmerge': None,
'xgettext': None,
}
@staticmethod
def nogettext_warning(location: BaseNode) -> None:
mlog.warning('Gettext not found, all translation targets will be ignored.', once=True, location=location)
@staticmethod
def _get_data_dirs(state: 'ModuleState', dirs: T.Iterable[str]) -> T.List[str]:
"""Returns source directories of relative paths"""
src_dir = path.join(state.environment.get_source_dir(), state.subdir)
return [path.join(src_dir, d) for d in dirs]
@FeatureNew('i18n.merge_file', '0.37.0')
@noPosargs
@typed_kwargs(
'i18n.merge_file',
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_INSTALL_TAG_KW,
CT_OUTPUT_KW,
INSTALL_KW,
_ARGS.evolve(since='0.51.0'),
_DATA_DIRS.evolve(since='0.41.0'),
KwargInfo('po_dir', str, required=True),
KwargInfo('type', str, default='xml', validator=in_set_validator({'xml', 'desktop'})),
)
def merge_file(self, state: 'ModuleState', args: T.List['TYPE_var'], kwargs: 'MergeFile') -> ModuleReturnValue:
if self.tools['msgfmt'] is None:
self.tools['msgfmt'] = state.find_program('msgfmt', for_machine=mesonlib.MachineChoice.BUILD)
podir = path.join(state.build_to_src, state.subdir, kwargs['po_dir'])
ddirs = self._get_data_dirs(state, kwargs['data_dirs'])
datadirs = '--datadirs=' + ':'.join(ddirs) if ddirs else None
command: T.List[T.Union[str, build.BuildTarget, build.CustomTarget,
build.CustomTargetIndex, 'ExternalProgram', mesonlib.File]] = []
command.extend(state.environment.get_build_command())
command.extend([
'--internal', 'msgfmthelper',
'--msgfmt=' + self.tools['msgfmt'].get_path(),
])
if datadirs:
command.append(datadirs)
command.extend(['@INPUT@', '@OUTPUT@', kwargs['type'], podir])
if kwargs['args']:
command.append('--')
command.extend(kwargs['args'])
build_by_default = kwargs['build_by_default']
if build_by_default is None:
build_by_default = kwargs['install']
ct = build.CustomTarget(
'',
state.subdir,
state.subproject,
command,
kwargs['input'],
kwargs['output'],
build_by_default=build_by_default,
install=kwargs['install'],
install_dir=kwargs['install_dir'],
install_tag=kwargs['install_tag'],
)
return ModuleReturnValue(ct, [ct])
@typed_pos_args('i81n.gettext', str)
@typed_kwargs(
'i18n.gettext',
_ARGS,
_DATA_DIRS.evolve(since='0.36.0'),
INSTALL_KW.evolve(default=True),
KwargInfo('install_dir', (str, NoneType), since='0.50.0'),
KwargInfo('languages', ContainerTypeInfo(list, str), default=[], listify=True),
KwargInfo(
'preset',
(str, NoneType),
validator=in_set_validator(set(PRESET_ARGS)),
since='0.37.0',
),
)
def gettext(self, state: 'ModuleState', args: T.Tuple[str], kwargs: 'Gettext') -> ModuleReturnValue:
for tool in ['msgfmt', 'msginit', 'msgmerge', 'xgettext']:
if self.tools[tool] is None:
self.tools[tool] = state.find_program(tool, required=False, for_machine=mesonlib.MachineChoice.BUILD)
# still not found?
if not self.tools[tool].found():
self.nogettext_warning(state.current_node)
return ModuleReturnValue(None, [])
packagename = args[0]
pkg_arg = f'--pkgname={packagename}'
languages = kwargs['languages']
lang_arg = '--langs=' + '@@'.join(languages) if languages else None
_datadirs = ':'.join(self._get_data_dirs(state, kwargs['data_dirs']))
datadirs = f'--datadirs={_datadirs}' if _datadirs else None
extra_args = kwargs['args']
targets: T.List['Target'] = []
gmotargets: T.List['build.CustomTarget'] = []
preset = kwargs['preset']
if preset:
preset_args = PRESET_ARGS[preset]
extra_args = list(mesonlib.OrderedSet(preset_args + extra_args))
extra_arg = '--extra-args=' + '@@'.join(extra_args) if extra_args else None
potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg]
if datadirs:
potargs.append(datadirs)
if extra_arg:
potargs.append(extra_arg)
potargs.append('--xgettext=' + self.tools['xgettext'].get_path())
pottarget = build.RunTarget(packagename + '-pot', potargs, [], state.subdir, state.subproject)
targets.append(pottarget)
install = kwargs['install']
install_dir = kwargs['install_dir'] or state.environment.coredata.get_option(mesonlib.OptionKey('localedir'))
assert isinstance(install_dir, str), 'for mypy'
if not languages:
languages = read_linguas(path.join(state.environment.source_dir, state.subdir))
for l in languages:
po_file = mesonlib.File.from_source_file(state.environment.source_dir,
state.subdir, l+'.po')
gmotarget = build.CustomTarget(
f'{packagename}-{l}.mo',
path.join(state.subdir, l, 'LC_MESSAGES'),
state.subproject,
[self.tools['msgfmt'], '@INPUT@', '-o', '@OUTPUT@'],
[po_file],
[f'{packagename}.mo'],
install=install,
# We have multiple files all installed as packagename+'.mo' in different install subdirs.
# What we really wanted to do, probably, is have a rename: kwarg, but that's not available
# to custom_targets. Crude hack: set the build target's subdir manually.
# Bonus: the build tree has something usable as an uninstalled bindtextdomain() target dir.
install_dir=[path.join(install_dir, l, 'LC_MESSAGES')],
install_tag=['i18n'],
)
targets.append(gmotarget)
gmotargets.append(gmotarget)
allgmotarget = build.AliasTarget(packagename + '-gmo', gmotargets, state.subdir, state.subproject)
targets.append(allgmotarget)
updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]
if lang_arg:
updatepoargs.append(lang_arg)
if datadirs:
updatepoargs.append(datadirs)
if extra_arg:
updatepoargs.append(extra_arg)
for tool in ['msginit', 'msgmerge']:
updatepoargs.append(f'--{tool}=' + self.tools[tool].get_path())
updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)
targets.append(updatepotarget)
return ModuleReturnValue([gmotargets, pottarget, updatepotarget], targets)
@FeatureNew('i18n.itstool_join', '0.62.0')
@noPosargs
@typed_kwargs(
'i18n.itstool_join',
CT_BUILD_BY_DEFAULT,
CT_INPUT_KW,
CT_INSTALL_DIR_KW,
CT_INSTALL_TAG_KW,
CT_OUTPUT_KW,
INSTALL_KW,
_ARGS.evolve(),
KwargInfo('its_files', ContainerTypeInfo(list, str)),
KwargInfo('mo_targets', ContainerTypeInfo(list, build.CustomTarget), required=True),
)
def itstool_join(self, state: 'ModuleState', args: T.List['TYPE_var'], kwargs: 'ItsJoinFile') -> ModuleReturnValue:
if self.tools['itstool'] is None:
self.tools['itstool'] = state.find_program('itstool', for_machine=mesonlib.MachineChoice.BUILD)
mo_targets = kwargs['mo_targets']
its_files = kwargs.get('its_files', [])
mo_fnames = []
for target in mo_targets:
mo_fnames.append(path.join(target.get_subdir(), target.get_outputs()[0]))
command: T.List[T.Union[str, build.BuildTarget, build.CustomTarget,
build.CustomTargetIndex, 'ExternalProgram', mesonlib.File]] = []
command.extend(state.environment.get_build_command())
command.extend([
'--internal', 'itstool', 'join',
'-i', '@INPUT@',
'-o', '@OUTPUT@',
'--itstool=' + self.tools['itstool'].get_path(),
])
if its_files:
for fname in its_files:
if not path.isabs(fname):
fname = path.join(state.environment.source_dir, state.subdir, fname)
command.extend(['--its', fname])
command.extend(mo_fnames)
build_by_default = kwargs['build_by_default']
if build_by_default is None:
build_by_default = kwargs['install']
ct = build.CustomTarget(
'',
state.subdir,
state.subproject,
command,
kwargs['input'],
kwargs['output'],
build_by_default=build_by_default,
extra_depends=mo_targets,
install=kwargs['install'],
install_dir=kwargs['install_dir'],
install_tag=kwargs['install_tag'],
)
return ModuleReturnValue(ct, [ct])
def initialize(interp: 'Interpreter') -> I18nModule:
return I18nModule(interp)
| |
# Copyright (c) 2016 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mistral import context
from mistral import exceptions as exc
from mistral.tests.unit.rpc.kombu import base
from mistral.tests.unit.rpc.kombu import fake_kombu
import mock
import socket
from stevedore import driver
with mock.patch.dict('sys.modules', kombu=fake_kombu):
from mistral.rpc.kombu import kombu_server
class TestException(exc.MistralError):
pass
class KombuServerTestCase(base.KombuTestCase):
def setUp(self):
super(KombuServerTestCase, self).setUp()
self.conf = mock.MagicMock()
self.server = kombu_server.KombuRPCServer(self.conf)
self.ctx = type('context', (object,), {'to_dict': lambda self: {}})()
def test_is_running_is_running(self):
self.server._running.set()
self.assertTrue(self.server.is_running)
def test_is_running_is_not_running(self):
self.server._running.clear()
self.assertFalse(self.server.is_running)
def test_stop(self):
self.server.stop()
self.assertFalse(self.server.is_running)
def test_publish_message(self):
body = 'body'
reply_to = 'reply_to'
corr_id = 'corr_id'
type = 'type'
acquire_mock = mock.MagicMock()
fake_kombu.producer.acquire.return_value = acquire_mock
enter_mock = mock.MagicMock()
acquire_mock.__enter__.return_value = enter_mock
self.server.publish_message(body, reply_to, corr_id, type)
enter_mock.publish.assert_called_once_with(
body={'body': '"body"'},
exchange='openstack',
routing_key=reply_to,
correlation_id=corr_id,
type=type,
serializer='json'
)
def test_run_launch_successfully(self):
acquire_mock = mock.MagicMock()
acquire_mock.drain_events.side_effect = TestException()
fake_kombu.connection.acquire.return_value = acquire_mock
self.assertRaises(TestException, self.server._run, 'blocking')
self.assertTrue(self.server.is_running)
def test_run_launch_successfully_than_stop(self):
def side_effect(*args, **kwargs):
self.assertTrue(self.server.is_running)
raise KeyboardInterrupt
acquire_mock = mock.MagicMock()
acquire_mock.drain_events.side_effect = side_effect
fake_kombu.connection.acquire.return_value = acquire_mock
self.server._run('blocking')
self.assertFalse(self.server.is_running)
self.assertEqual(self.server._sleep_time, 1)
def test_run_socket_error_reconnect(self):
def side_effect(*args, **kwargs):
if acquire_mock.drain_events.call_count == 1:
raise socket.error()
raise TestException()
acquire_mock = mock.MagicMock()
acquire_mock.drain_events.side_effect = side_effect
fake_kombu.connection.acquire.return_value = acquire_mock
self.assertRaises(TestException, self.server._run, 'blocking')
self.assertEqual(self.server._sleep_time, 1)
def test_run_socket_timeout_still_running(self):
def side_effect(*args, **kwargs):
if acquire_mock.drain_events.call_count == 0:
raise socket.timeout()
raise TestException()
acquire_mock = mock.MagicMock()
acquire_mock.drain_events.side_effect = side_effect
fake_kombu.connection.acquire.return_value = acquire_mock
self.assertRaises(
TestException,
self.server._run,
'blocking'
)
self.assertTrue(self.server.is_running)
def test_run_keyboard_interrupt_not_running(self):
acquire_mock = mock.MagicMock()
acquire_mock.drain_events.side_effect = KeyboardInterrupt()
fake_kombu.connection.acquire.return_value = acquire_mock
self.assertIsNone(self.server.run())
self.assertFalse(self.server.is_running)
@mock.patch.object(
kombu_server.KombuRPCServer,
'_on_message',
mock.MagicMock()
)
@mock.patch.object(kombu_server.KombuRPCServer, 'publish_message')
def test__on_message_safe_message_processing_ok(self, publish_message):
message = mock.MagicMock()
self.server._on_message_safe(None, message)
self.assertEqual(message.ack.call_count, 1)
self.assertEqual(publish_message.call_count, 0)
@mock.patch.object(kombu_server.KombuRPCServer, '_on_message')
@mock.patch.object(kombu_server.KombuRPCServer, 'publish_message')
def test__on_message_safe_message_processing_raise(
self,
publish_message,
_on_message
):
reply_to = 'reply_to'
correlation_id = 'corr_id'
message = mock.MagicMock()
message.properties = {
'reply_to': reply_to,
'correlation_id': correlation_id
}
test_exception = TestException()
_on_message.side_effect = test_exception
self.server._on_message_safe(None, message)
self.assertEqual(message.ack.call_count, 1)
self.assertEqual(publish_message.call_count, 1)
@mock.patch.object(
kombu_server.KombuRPCServer,
'_get_rpc_method',
mock.MagicMock(return_value=None)
)
def test__on_message_rpc_method_not_found(self):
request = {
'rpc_ctx': {},
'rpc_method': 'not_found_method',
'arguments': {}
}
message = mock.MagicMock()
message.properties = {
'reply_to': None,
'correlation_id': None
}
self.assertRaises(
exc.MistralException,
self.server._on_message,
request,
message
)
@mock.patch.object(kombu_server.KombuRPCServer, 'publish_message')
@mock.patch.object(kombu_server.KombuRPCServer, '_get_rpc_method')
@mock.patch('mistral.context.MistralContext.from_dict')
def test__on_message_is_async(self, mock_get_context, get_rpc_method,
publish_message):
result = 'result'
request = {
'async': True,
'rpc_ctx': {},
'rpc_method': 'found_method',
'arguments': self.server._serialize_message({
'a': 1,
'b': 2
})
}
message = mock.MagicMock()
message.properties = {
'reply_to': None,
'correlation_id': None
}
message.delivery_info.get.return_value = False
rpc_method = mock.MagicMock(return_value=result)
get_rpc_method.return_value = rpc_method
ctx = context.MistralContext()
mock_get_context.return_value = ctx
self.server._on_message(request, message)
rpc_method.assert_called_once_with(
rpc_ctx=ctx,
a=1,
b=2
)
self.assertEqual(publish_message.call_count, 0)
@mock.patch.object(kombu_server.KombuRPCServer, 'publish_message')
@mock.patch.object(kombu_server.KombuRPCServer, '_get_rpc_method')
@mock.patch('mistral.context.MistralContext.from_dict')
def test__on_message_is_sync(self, mock_get_context, get_rpc_method,
publish_message):
result = 'result'
request = {
'async': False,
'rpc_ctx': {},
'rpc_method': 'found_method',
'arguments': self.server._serialize_message({
'a': 1,
'b': 2
})
}
reply_to = 'reply_to'
correlation_id = 'corr_id'
message = mock.MagicMock()
message.properties = {
'reply_to': reply_to,
'correlation_id': correlation_id
}
message.delivery_info.get.return_value = False
rpc_method = mock.MagicMock(return_value=result)
get_rpc_method.return_value = rpc_method
ctx = context.MistralContext()
mock_get_context.return_value = ctx
self.server._on_message(request, message)
rpc_method.assert_called_once_with(
rpc_ctx=ctx,
a=1,
b=2
)
publish_message.assert_called_once_with(
result,
reply_to,
correlation_id
)
@mock.patch('stevedore.driver.DriverManager')
def test__prepare_worker(self, driver_manager_mock):
worker_mock = mock.MagicMock()
mgr_mock = mock.MagicMock()
mgr_mock.driver.return_value = worker_mock
def side_effect(*args, **kwargs):
return mgr_mock
driver_manager_mock.side_effect = side_effect
self.server._prepare_worker('blocking')
self.assertEqual(self.server._worker, worker_mock)
@mock.patch('stevedore.driver.DriverManager')
def test__prepare_worker_no_valid_executor(self, driver_manager_mock):
driver_manager_mock.side_effect = driver.NoMatches()
self.assertRaises(
driver.NoMatches,
self.server._prepare_worker,
'non_valid_executor'
)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import girder_client
import json
import mock
import os
import shutil
import six
import time
from six import StringIO
import hashlib
from girder import config, events
from tests import base
os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_TEST_PORT', '20200')
config.loadConfig() # Must reload config to pickup correct port
def setUpModule():
plugins = os.environ.get('ENABLED_PLUGINS', '')
if plugins:
base.enabledPlugins.extend(plugins.split())
base.startServer(False)
def tearDownModule():
base.stopServer()
class PythonClientTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
def writeFile(dirName):
filename = os.path.join(dirName, 'f')
f = open(filename, 'w')
f.write(filename)
f.close()
# make some temp dirs and files
self.libTestDir = os.path.join(os.path.dirname(__file__),
'_libTestDir')
# unlink old temp dirs and files first
shutil.rmtree(self.libTestDir, ignore_errors=True)
os.mkdir(self.libTestDir)
writeFile(self.libTestDir)
for subDir in range(0, 3):
subDirName = os.path.join(self.libTestDir, 'sub'+str(subDir))
os.mkdir(subDirName)
writeFile(subDirName)
def tearDown(self):
shutil.rmtree(self.libTestDir, ignore_errors=True)
base.TestCase.tearDown(self)
def testRestCore(self):
client = girder_client.GirderClient(port=os.environ['GIRDER_PORT'])
# Register a user
user = client.createResource('user', params={
'firstName': 'First',
'lastName': 'Last',
'login': 'mylogin',
'password': 'password',
'email': 'email@email.com'
})
self.assertTrue(user['admin'])
# Test authentication with bad args
flag = False
try:
client.authenticate()
except Exception:
flag = True
self.assertTrue(flag)
# Test authentication failure
flag = False
try:
client.authenticate(username=user['login'], password='wrong')
except girder_client.AuthenticationError:
flag = True
self.assertTrue(flag)
# Interactive login (successfully)
with mock.patch('six.moves.input', return_value=user['login']),\
mock.patch('getpass.getpass', return_value='password'):
client.authenticate(interactive=True)
# /user/me should now return our user info
user = client.getResource('user/me')
self.assertEqual(user['login'], 'mylogin')
# Test HTTP error case
flag = False
try:
client.getResource('user/badId')
except girder_client.HttpError as e:
self.assertEqual(e.status, 400)
self.assertEqual(e.method, 'GET')
resp = json.loads(e.responseText)
self.assertEqual(resp['type'], 'validation')
self.assertEqual(resp['field'], 'id')
self.assertEqual(resp['message'], 'Invalid ObjectId: badId')
flag = True
self.assertTrue(flag)
# Test some folder routes
folders = client.listFolder(
parentId=user['_id'], parentFolderType='user')
self.assertEqual(len(folders), 2)
privateFolder = publicFolder = None
for folder in folders:
if folder['name'] == 'Public':
publicFolder = folder
elif folder['name'] == 'Private':
privateFolder = folder
self.assertNotEqual(privateFolder, None)
self.assertNotEqual(publicFolder, None)
self.assertEqual(client.getFolder(privateFolder['_id']), privateFolder)
acl = client.getFolderAccess(privateFolder['_id'])
self.assertIn('users', acl)
self.assertIn('groups', acl)
client.setFolderAccess(privateFolder['_id'], json.dumps(acl),
public=False)
self.assertEqual(acl, client.getFolderAccess(privateFolder['_id']))
# Test recursive ACL propagation (not very robust test yet)
client.createFolder(privateFolder['_id'], name='Subfolder')
client.inheritAccessControlRecursive(privateFolder['_id'])
def testUploadCallbacks(self):
callbackUser = self.model('user').createUser(
firstName='Callback', lastName='Last', login='callback',
password='password', email='Callback@email.com')
callbackPublicFolder = six.next(self.model('folder').childFolders(
parentType='user', parent=callbackUser, user=None, limit=1))
callback_counts = {'folder': 0, 'item': 0}
folders = {}
items = {}
folders[self.libTestDir] = False
folder_count = 1 # 1 for self.libTestDir
item_count = 0
for root, dirs, files in os.walk(self.libTestDir):
for name in files:
items[os.path.join(root, name)] = False
item_count += 1
for name in dirs:
folders[os.path.join(root, name)] = False
folder_count += 1
def folder_callback(folder, filepath):
self.assertIn(filepath, six.viewkeys(folders))
folders[filepath] = True
callback_counts['folder'] += 1
def item_callback(item, filepath):
self.assertIn(filepath, six.viewkeys(items))
items[filepath] = True
callback_counts['item'] += 1
client = girder_client.GirderClient(port=os.environ['GIRDER_PORT'])
client.authenticate('callback', 'password')
client.add_folder_upload_callback(folder_callback)
client.add_item_upload_callback(item_callback)
client.upload(self.libTestDir, callbackPublicFolder['_id'])
# make sure counts are the same (callbacks not called more than once)
# and that all folders and files have callbacks called on them
self.assertEqual(folder_count, callback_counts['folder'])
self.assertEqual(item_count, callback_counts['item'])
self.assertTrue(all(six.viewvalues(items)))
self.assertTrue(all(six.viewvalues(folders)))
# Upload again with reuse_existing on
existingList = list(self.model('folder').childFolders(
parentType='folder', parent=callbackPublicFolder,
user=callbackUser, limit=0))
client.upload(self.libTestDir, callbackPublicFolder['_id'],
reuse_existing=True)
newList = list(self.model('folder').childFolders(
parentType='folder', parent=callbackPublicFolder,
user=callbackUser, limit=0))
self.assertEqual(existingList, newList)
self.assertEqual(len(newList), 1)
self.assertEqual([f['name'] for f in self.model('folder').childFolders(
parentType='folder', parent=newList[0],
user=callbackUser, limit=0)], ['sub0', 'sub1', 'sub2'])
# Test upload via a file-like object into a folder
callbacks = []
path = os.path.join(self.libTestDir, 'sub0', 'f')
size = os.path.getsize(path)
def progressCallback(info):
callbacks.append(info)
with open(path) as f:
with self.assertRaises(girder_client.IncorrectUploadLengthError):
try:
client.uploadFile(
callbackPublicFolder['_id'], stream=f, name='test',
size=size + 1, parentType='folder')
except girder_client.IncorrectUploadLengthError as exc:
self.assertEqual(
exc.upload['received'], exc.upload['size'] - 1)
upload = self.model('upload').load(exc.upload['_id'])
self.assertEqual(upload, None)
raise
with open(path) as f:
file = client.uploadFile(
callbackPublicFolder['_id'], stream=f, name='test', size=size,
parentType='folder', progressCallback=progressCallback)
self.assertEqual(len(callbacks), 1)
self.assertEqual(callbacks[0]['current'], size)
self.assertEqual(callbacks[0]['total'], size)
self.assertEqual(file['name'], 'test')
self.assertEqual(file['size'], size)
self.assertEqual(file['mimeType'], 'application/octet-stream')
items = list(
self.model('folder').childItems(folder=callbackPublicFolder))
self.assertEqual(len(items), 1)
self.assertEqual(items[0]['name'], 'test')
files = list(self.model('item').childFiles(items[0]))
self.assertEqual(len(files), 1)
def testUploadReference(self):
eventList = []
client = girder_client.GirderClient(port=os.environ['GIRDER_PORT'])
# Register a user
user = client.createResource('user', params={
'firstName': 'First',
'lastName': 'Last',
'login': 'mylogin',
'password': 'password',
'email': 'email@email.com'
})
client.authenticate('mylogin', 'password')
folders = client.listFolder(
parentId=user['_id'], parentFolderType='user', name='Public')
publicFolder = folders[0]
def processEvent(event):
eventList.append(event.info)
events.bind('data.process', 'lib_test', processEvent)
path = os.path.join(self.libTestDir, 'sub0', 'f')
size = os.path.getsize(path)
client.uploadFile(publicFolder['_id'], open(path), name='test1',
size=size, parentType='folder',
reference='test1_reference')
starttime = time.time()
while (not events.daemon.eventQueue.empty() and
time.time() - starttime < 5):
time.sleep(0.05)
self.assertEqual(len(eventList), 1)
self.assertEqual(eventList[0]['reference'], 'test1_reference')
client.uploadFileToItem(str(eventList[0]['file']['itemId']), path,
reference='test2_reference')
while (not events.daemon.eventQueue.empty() and
time.time() - starttime < 5):
time.sleep(0.05)
self.assertEqual(len(eventList), 2)
self.assertEqual(eventList[1]['reference'], 'test2_reference')
self.assertNotEqual(eventList[0]['file']['_id'],
eventList[1]['file']['_id'])
open(path, 'ab').write(b'test')
size = os.path.getsize(path)
client.uploadFileToItem(str(eventList[0]['file']['itemId']), path,
reference='test3_reference')
while (not events.daemon.eventQueue.empty() and
time.time() - starttime < 5):
time.sleep(0.05)
self.assertEqual(len(eventList), 3)
self.assertEqual(eventList[2]['reference'], 'test3_reference')
self.assertNotEqual(eventList[0]['file']['_id'],
eventList[2]['file']['_id'])
self.assertEqual(eventList[1]['file']['_id'],
eventList[2]['file']['_id'])
def testUploadContent(self):
client = girder_client.GirderClient(port=os.environ['GIRDER_PORT'])
# Register a user
user = client.createResource('user', params={
'firstName': 'First',
'lastName': 'Last',
'login': 'mylogin',
'password': 'password',
'email': 'email@email.com'
})
client.authenticate('mylogin', 'password')
folders = client.listFolder(
parentId=user['_id'], parentFolderType='user', name='Public')
publicFolder = folders[0]
path = os.path.join(self.libTestDir, 'sub0', 'f')
size = os.path.getsize(path)
file = client.uploadFile(publicFolder['_id'], open(path), name='test1',
size=size, parentType='folder',
reference='test1_reference')
contents = 'you\'ve changed!'
size = len(contents)
stream = StringIO(contents)
client.uploadFileContents(file['_id'], stream, size)
file = self.model('file').load(file['_id'], force=True)
sha = hashlib.sha512()
sha.update(contents.encode('utf8'))
self.assertEqual(file['sha512'], sha.hexdigest())
| |
"""Support for sending data to an Influx database."""
import logging
import re
import queue
import threading
import time
import math
import requests.exceptions
import voluptuous as vol
from homeassistant.const import (
CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_HOST, CONF_INCLUDE,
CONF_PASSWORD, CONF_PORT, CONF_SSL, CONF_USERNAME, CONF_VERIFY_SSL,
EVENT_STATE_CHANGED, EVENT_HOMEASSISTANT_STOP, STATE_UNAVAILABLE,
STATE_UNKNOWN)
from homeassistant.helpers import state as state_helper, event as event_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_values import EntityValues
_LOGGER = logging.getLogger(__name__)
CONF_DB_NAME = 'database'
CONF_TAGS = 'tags'
CONF_DEFAULT_MEASUREMENT = 'default_measurement'
CONF_OVERRIDE_MEASUREMENT = 'override_measurement'
CONF_TAGS_ATTRIBUTES = 'tags_attributes'
CONF_COMPONENT_CONFIG = 'component_config'
CONF_COMPONENT_CONFIG_GLOB = 'component_config_glob'
CONF_COMPONENT_CONFIG_DOMAIN = 'component_config_domain'
CONF_RETRY_COUNT = 'max_retries'
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_VERIFY_SSL = True
DOMAIN = 'influxdb'
TIMEOUT = 5
RETRY_DELAY = 20
QUEUE_BACKLOG_SECONDS = 30
RETRY_INTERVAL = 60 # seconds
BATCH_TIMEOUT = 1
BATCH_BUFFER_SIZE = 100
COMPONENT_CONFIG_SCHEMA_ENTRY = vol.Schema({
vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(vol.Schema({
vol.Optional(CONF_HOST): cv.string,
vol.Inclusive(CONF_USERNAME, 'authentication'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'authentication'): cv.string,
vol.Optional(CONF_EXCLUDE, default={}): vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string])
}),
vol.Optional(CONF_INCLUDE, default={}): vol.Schema({
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
vol.Optional(CONF_DOMAINS, default=[]):
vol.All(cv.ensure_list, [cv.string])
}),
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL): cv.boolean,
vol.Optional(CONF_RETRY_COUNT, default=0): cv.positive_int,
vol.Optional(CONF_DEFAULT_MEASUREMENT): cv.string,
vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string,
vol.Optional(CONF_TAGS, default={}):
vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_TAGS_ATTRIBUTES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_COMPONENT_CONFIG, default={}):
vol.Schema({cv.entity_id: COMPONENT_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}):
vol.Schema({cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}),
vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}):
vol.Schema({cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}),
})),
}, extra=vol.ALLOW_EXTRA)
RE_DIGIT_TAIL = re.compile(r'^[^\.]*\d+\.?\d+[^\.]*$')
RE_DECIMAL = re.compile(r'[^\d.]+')
def setup(hass, config):
"""Set up the InfluxDB component."""
from influxdb import InfluxDBClient, exceptions
conf = config[DOMAIN]
kwargs = {
'database': conf[CONF_DB_NAME],
'verify_ssl': conf[CONF_VERIFY_SSL],
'timeout': TIMEOUT
}
if CONF_HOST in conf:
kwargs['host'] = conf[CONF_HOST]
if CONF_PORT in conf:
kwargs['port'] = conf[CONF_PORT]
if CONF_USERNAME in conf:
kwargs['username'] = conf[CONF_USERNAME]
if CONF_PASSWORD in conf:
kwargs['password'] = conf[CONF_PASSWORD]
if CONF_SSL in conf:
kwargs['ssl'] = conf[CONF_SSL]
include = conf.get(CONF_INCLUDE, {})
exclude = conf.get(CONF_EXCLUDE, {})
whitelist_e = set(include.get(CONF_ENTITIES, []))
whitelist_d = set(include.get(CONF_DOMAINS, []))
blacklist_e = set(exclude.get(CONF_ENTITIES, []))
blacklist_d = set(exclude.get(CONF_DOMAINS, []))
tags = conf.get(CONF_TAGS)
tags_attributes = conf.get(CONF_TAGS_ATTRIBUTES)
default_measurement = conf.get(CONF_DEFAULT_MEASUREMENT)
override_measurement = conf.get(CONF_OVERRIDE_MEASUREMENT)
component_config = EntityValues(
conf[CONF_COMPONENT_CONFIG],
conf[CONF_COMPONENT_CONFIG_DOMAIN],
conf[CONF_COMPONENT_CONFIG_GLOB])
max_tries = conf.get(CONF_RETRY_COUNT)
try:
influx = InfluxDBClient(**kwargs)
influx.write_points([])
except (exceptions.InfluxDBClientError,
requests.exceptions.ConnectionError) as exc:
_LOGGER.warning(
"Database host is not accessible due to '%s', please "
"check your entries in the configuration file (host, "
"port, etc.) and verify that the database exists and is "
"READ/WRITE. Retrying again in %s seconds.", exc, RETRY_INTERVAL
)
event_helper.call_later(
hass, RETRY_INTERVAL, lambda _: setup(hass, config)
)
return True
def event_to_json(event):
"""Add an event to the outgoing Influx list."""
state = event.data.get('new_state')
if state is None or state.state in (
STATE_UNKNOWN, '', STATE_UNAVAILABLE) or \
state.entity_id in blacklist_e or state.domain in blacklist_d:
return
try:
if ((whitelist_e or whitelist_d)
and state.entity_id not in whitelist_e
and state.domain not in whitelist_d):
return
_include_state = _include_value = False
_state_as_value = float(state.state)
_include_value = True
except ValueError:
try:
_state_as_value = float(state_helper.state_as_number(state))
_include_state = _include_value = True
except ValueError:
_include_state = True
include_uom = True
measurement = component_config.get(state.entity_id).get(
CONF_OVERRIDE_MEASUREMENT)
if measurement in (None, ''):
if override_measurement:
measurement = override_measurement
else:
measurement = state.attributes.get('unit_of_measurement')
if measurement in (None, ''):
if default_measurement:
measurement = default_measurement
else:
measurement = state.entity_id
else:
include_uom = False
json = {
'measurement': measurement,
'tags': {
'domain': state.domain,
'entity_id': state.object_id,
},
'time': event.time_fired,
'fields': {}
}
if _include_state:
json['fields']['state'] = state.state
if _include_value:
json['fields']['value'] = _state_as_value
for key, value in state.attributes.items():
if key in tags_attributes:
json['tags'][key] = value
elif key != 'unit_of_measurement' or include_uom:
# If the key is already in fields
if key in json['fields']:
key = key + "_"
# Prevent column data errors in influxDB.
# For each value we try to cast it as float
# But if we can not do it we store the value
# as string add "_str" postfix to the field key
try:
json['fields'][key] = float(value)
except (ValueError, TypeError):
new_key = "{}_str".format(key)
new_value = str(value)
json['fields'][new_key] = new_value
if RE_DIGIT_TAIL.match(new_value):
json['fields'][key] = float(
RE_DECIMAL.sub('', new_value))
# Infinity and NaN are not valid floats in InfluxDB
try:
if not math.isfinite(json['fields'][key]):
del json['fields'][key]
except (KeyError, TypeError):
pass
json['tags'].update(tags)
return json
instance = hass.data[DOMAIN] = InfluxThread(
hass, influx, event_to_json, max_tries)
instance.start()
def shutdown(event):
"""Shut down the thread."""
instance.queue.put(None)
instance.join()
influx.close()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class InfluxThread(threading.Thread):
"""A threaded event handler class."""
def __init__(self, hass, influx, event_to_json, max_tries):
"""Initialize the listener."""
threading.Thread.__init__(self, name='InfluxDB')
self.queue = queue.Queue()
self.influx = influx
self.event_to_json = event_to_json
self.max_tries = max_tries
self.write_errors = 0
self.shutdown = False
hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener)
def _event_listener(self, event):
"""Listen for new messages on the bus and queue them for Influx."""
item = (time.monotonic(), event)
self.queue.put(item)
@staticmethod
def batch_timeout():
"""Return number of seconds to wait for more events."""
return BATCH_TIMEOUT
def get_events_json(self):
"""Return a batch of events formatted for writing."""
queue_seconds = QUEUE_BACKLOG_SECONDS + self.max_tries*RETRY_DELAY
count = 0
json = []
dropped = 0
try:
while len(json) < BATCH_BUFFER_SIZE and not self.shutdown:
timeout = None if count == 0 else self.batch_timeout()
item = self.queue.get(timeout=timeout)
count += 1
if item is None:
self.shutdown = True
else:
timestamp, event = item
age = time.monotonic() - timestamp
if age < queue_seconds:
event_json = self.event_to_json(event)
if event_json:
json.append(event_json)
else:
dropped += 1
except queue.Empty:
pass
if dropped:
_LOGGER.warning("Catching up, dropped %d old events", dropped)
return count, json
def write_to_influxdb(self, json):
"""Write preprocessed events to influxdb, with retry."""
from influxdb import exceptions
for retry in range(self.max_tries+1):
try:
self.influx.write_points(json)
if self.write_errors:
_LOGGER.error("Resumed, lost %d events", self.write_errors)
self.write_errors = 0
_LOGGER.debug("Wrote %d events", len(json))
break
except (exceptions.InfluxDBClientError, IOError):
if retry < self.max_tries:
time.sleep(RETRY_DELAY)
else:
if not self.write_errors:
_LOGGER.exception("Write error")
self.write_errors += len(json)
def run(self):
"""Process incoming events."""
while not self.shutdown:
count, json = self.get_events_json()
if json:
self.write_to_influxdb(json)
for _ in range(count):
self.queue.task_done()
def block_till_done(self):
"""Block till all events processed."""
self.queue.join()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsOperations(object):
"""ExpressRoutePortsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePort"
"""Retrieves the requested ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of ExpressRoutePort.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePort"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRoutePort')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.ExpressRoutePort"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRoutePort"]
"""Creates or updates the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to the create ExpressRoutePort operation.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.ExpressRoutePort
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePort"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
express_route_port_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRoutePort"]
"""Update ExpressRoutePort tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to update ExpressRoutePort resource tags.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_12_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortListResult"]
"""List all the ExpressRoutePort resources in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_12_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
| |
# Python 3.3 or higher
import shelve
import os
import datetime
import time
import uuid
import glob
import sys
import random
import shutil
#if sys.version_info.minor == 3:
# from contextlib import closing
# shelve_open = lambda file, flag="c", protocol=None, writeback=False: closing(shelve.open(file, flag))
#else:
# shelve_open = shelve.open
from multiprocessing import Pipe
from zashel.utils import threadize
from zrest.basedatamodel import *
from zrest.exceptions import *
from math import ceil
from .filelock import FileLock, Timeout
from contextlib import contextmanager
import json
@contextmanager
def shelve_open(pathname, flag="c", protocol=None, writeback=False, timeout=5, poll_interval=None,
lockes=dict()): #It's an easy way to save it on memory
if os.path.exists(pathname) is False:
shelf = shelve.open(pathname, "c")
shelf.close()
if pathname not in lockes:
lockes[pathname] = FileLock(pathname)
lock = lockes[pathname]
kwargs = dict()
if timeout is not None:
kwargs["timeout"] = timeout
if poll_interval is not None:
kwargs["poll_interval"] = poll_interval
lock.acquire(**kwargs)
try:
shelf = shelve.open(pathname, flag, protocol, writeback)
yield shelf
except Timeout:
pass #TODO review if it works
finally:
shelf.close()
lock.release()
class ShelveModel(RestfulBaseInterface):
"""
ShelveModel with a double interface:
An inner interface with new, edit, replace, drop and fetch whose take dictionaries.
A Restful interface with post, patch, put, delete and get whose take json data.
To use with zrest.
"""
def __init__(self, filepath, groups=10, *, index_fields=None,
headers=None,
name=None,
items_per_page=50,
unique=None,
unique_is_id=False,
split_unique=0,
to_block=True,
light_index=True):
"""
Initializes ShelveModel
:param filepath: path to save the database files
:param groups: splits to data database
:param index_fields: fields indexed. Not indexed fields do not accept queries
:param headers: headers of table. None by default. If None dictionaries are
stored.
:param name: name of the model
:param items_per_page: amount of items each page
:param unique: unique field. One bye the moment
:param split_unique: number of characters of each piece in which unique is
splitted
"""
try:
assert os.path.exists(filepath)
except AssertionError:
os.makedirs(filepath)
if items_per_page is None:
items_per_page = 50
self.light_index = light_index
self.uuid = str(uuid.uuid4())
self._filepath = filepath
self._alive = False
self._opened = True
self._pipe_in, self._pipe_out = Pipe(False)
self._close = False
self._headers = headers
self._headers_checked = False
if isinstance(unique, str):
self._unique = unique
else:
self._unique = [unique]
self._split_unique = split_unique
self._unique_is_id = unique_is_id
self._name = name
self.items_per_page = items_per_page
self._to_block = to_block
if index_fields is None:
self._index_fields = list()
else:
assert isinstance(index_fields, list)
self._index_fields = index_fields
try:
assert any([os.path.exists(file)
for file in glob.glob("{}.*".format(self._meta_path))]+[False])
except AssertionError:
with shelve_open(self._meta_path) as shelf:
shelf["filepath"] = self._meta_path
shelf["total"] = int()
shelf["next"] = 1
shelf["groups"] = groups
shelf["class"] = self.__class__.__name__
shelf["name"] = self._name
shelf["ids"] = list()
if self.light_index is False:
for index in self.index_fields:
if (self._unique_is_id is True and self._unique != index) or self._unique_is_id is False:
with shelve_open(self._index_path(index)) as shelf:
shelf["filepath"] = self._index_path(index)
for group in range(0, groups):
with shelve_open(self._data_path(group)) as shelf:
shelf["filepath"] = self._data_path(group)
if len(self._unique) > 1:
self._index_fields.append("_unique")
with shelve_open(self._index_path("_unique")) as shelf:
shelf["filepath"] = self._index_path("_unique")
self.writer = self._writer()
with shelve_open(self._meta_path, "r") as shelf:
self._groups = shelf["groups"]
time.sleep(0.05)
self._as_foreign = list()
self._as_child = list()
def __len__(self):
final = None
while True:
try:
with shelve_open(self._meta_path, "r") as meta:
final = meta["total"]
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print("__len__: ", e)
continue
else:
if final is not None:
break
return final
def __next__(self): #This is not very appropiate, but...
final = None
while True:
try:
with shelve_open(self._meta_path, "r") as meta:
final = meta["next"]
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print("__next__: ", e)
continue
else:
if final is not None:
break
return final
@property
def name(self):
if self._name == None:
with shelve_open(self._meta_path, "r") as shelf:
self._name = shelf["name"]
return self._name
@name.setter
def name(self, value):
with shelve_open(self._meta_path) as shelf:
shelf["name"] = value
self._name = value
@property
def index_fields(self):
return self._index_fields
@property
def indexes_files(self):
return [self._index_path(index) for index in self.index_fields]
@property
def groups(self):
return self._groups
@property
def data_files(self):
return [self._data_path(index) for index in range(0, self.groups)]
@property
def filepath(self):
return self._filepath
@property
def headers(self):
if self._headers is None and self._headers_checked is False:
try:
with shelve_open(self._data_path(0), "r") as shelf:
self._headers = shelf["headers"]
except KeyError:
self._headers = None
finally:
self._headers_checked = True
return self._headers
@property
def _meta_path(self):
return os.path.join(self.filepath, "meta")
@property
def unique(self):
if len(self._unique) > 1:
return "_unique"
elif len(self._unique) == 1:
return self._unique[0]
else:
return None
def _index_path(self, field):
return os.path.join(self.filepath, "index_{}".format(field))
def _data_path(self, group):
return os.path.join(self.filepath, "data_{}".format(str(group)))
def _send_pipe(self, **kwargs):
self._pipe_out.send(kwargs)
def get_unique_hash(self, data):
final = str()
if len(self._unique) > 1 and type(self._unique) != str:
for item in self._unique:
if type(data[item]) in (datetime.datetime, datetime.time, datetime.timedelta):
final += data[item].strftime("%Y%m%d")
else:
final += str(data[item])
elif len(self._unique) == 0:
return data[self._unique[0]]
elif type(self._unique) == str:
return data[self._unique]
def fetch(self, filter, **kwargs):
"""
Gives the result of a query.
:param filter: dictionary with wanted coincidences
:returns: dictionary with result of the query
"""
conn_in, conn_out = Pipe(False)
self._send_pipe(action="fetch", filter=filter, data={}, pipe=conn_out)
return conn_in.recv()
def _fetch(self, registries, shelf):
if isinstance(registries, int):
registries = {registries}
final = list()
with shelve_open(shelf, "r") as file:
for item in registries:
try:
data = file[str(item)]
except KeyError:
data = None
if isinstance(data, list) and self.headers is not None:
if data is not None and len(data)==len(self.headers):
data = dict(zip(self.headers, data))
else:
data = None
if isinstance(data, dict):
data.update({"_id": item})
if data is not None:
final.append(data)
return final
def _is_unique(self, data):
if self.light_index is True:
index_path = None
if self._unique is not None:
index_path = os.path.join(self._index_path(self.unique), str(self.get_unique_hash(data)))
if index_path is not None:
try:
lsdir = os.listdir(index_path)
except FileNotFoundError:
return False
if len(lsdir) > 0:
return True
else:
return False
else:
return False
else:
field = None
data_field = None
with shelve_open(self._index_path(self.unique)) as shelf:
if data_field in shelf and shelf[self.get_unique_hash(data)] != set():
return True
else:
return False
def _set_index(self, data, registry):
if isinstance(data, list) and self.headers is not None and len(data) == len(self.headers):
data = dict(zip(self.headers, data))
if self.light_index is True:
for field in data:
if field in self.index_fields:
index_path = os.path.join(self._index_path(field), str(data[field]), str(registry))
os.makedirs(index_path, exist_ok=True)
if len(self._unique) > 1:
index_path = os.path.join(self._index_path("_unique"), str(self.get_unique_hash(data)), str(registry))
os.makedirs(index_path, exist_ok=True)
else:
for field in data:
if (any([os.path.exists(file)
for file in glob.glob("{}.*".format(self._index_path(field)))]+[False])):
with shelve_open(self._index_path(field)) as shelf:
index = str(data[field])
last = shelf
if not index in shelf:
if field != self._unique or self._split_unique == 0:
shelf[str(index)] = set()
elif self._unique_is_id is False:
offset = len(str(index))%self._split_unique
if offset:
inter = str(index)[0:offset]
if inter not in last:
last[inter] = dict()
last = last[inter]
for x in range(ceil(len(str(index))/self._unique)):
inter = str(index)[offset+x*self._split_unique:offset+(x+1)*self._split_unique]
if inter not in last:
last[inter] = dict()
last = last[inter]
last = registry
if field != self._unique:
shelf[str(index)] |= {registry}
else:
shelf[str(index)] = {registry}
if len(self._unique) > 1:
with shelve_open(self._index_path("_unique")) as shelf:
shelf[self.get_unique_hash(data)] = {registry}
def _del_index(self, data, registry):
if self.light_index is True:
try:
for field in data:
index_path = os.path.join(self._index_path(field), str(data[field]), str(registry))
if os.path.exists(index_path) is True:
shutil.rmtree(index_path, ignore_errors=True)
if len(self._unique) > 1:
index_path = os.path.join(self._index_path("_unique"), str(self.get_unique_hash(data)))
if os.path.exists(index_path) is True:
shutil.rmtree(index_path, ignore_errors=True)
except TypeError:
print(data, registry)
else:
for field in data:
if (any([os.path.exists(file)
for file in glob.glob("{}.*".format(self._index_path(field)))]+[False])):
with shelve_open(self._index_path(field)) as shelf:
index = str(data[field])
if index in shelf:
shelf[index] -= {registry}
if len(self._unique) > 1:
with shelve_open(self._index_path(field)) as shelf:
index = str(data[field])
if index in shelf:
del(shelf[index])
def _check_child(self, data):
if self._as_child:
for foreign_key in self._as_child:
if not foreign_key.field in data:
return 1
else:
foreign = foreign_key.foreign.fetch({"_id": data[foreign_key.field]}) # Change to header
if not foreign:
return 2
return 0
def insert(self, data, **kwargs):
"""
Loads new given data in the database
Blocks until finnish
:param data: list with a dictionary for each item to upload
:returns: New Data
"""
if self.unique is None:
conn_in, conn_out = Pipe(False)
self._send_pipe(action="insert", data=data, pipe=conn_out)
recv = conn_in.recv()
return recv
else:
return {"Error": 501}
def _insert(self, data, filename_reg):
for filename in filename_reg:
with shelve_open(filename) as shelf:
for index in filename_reg[filename]:
new_data = data[str(index)]
if self.headers is not None:
new_data = list()
for header in self.headers:
try:
new_data.append(data[str(index)][header])
except KeyError:
new_data.append("")
for index_name in self.index_fields:
index_dict = dict()
for index in data:
if str(data[index][index_name]) not in index_dict:
index_dict[str(data[index][index_name])] = set()
index_dict[str(data[index][index_name])].add(int(index))
if self.light_index is True:
for index in index_dict:
for item in index_dict[index]:
item = str(item)
try:
os.makedirs(os.path.join(self._index_path(index_name), index, item))
except PermissionError:
pass
else:
with shelve_open(self._index_path(index_name)) as shelf:
for index in index_dict:
if index not in shelf:
shelf[index] = index_dict[index]
else:
shelf[index] |= index_dict[index]
with shelve_open(self._meta_path) as shelf:
total, next_ = len(self), next(self)
shelf["total"] = total + len(data)
shelf["next"] = next_ + len(data)
shelf["ids"] = set([int(key) for key in list(data.keys())])
def new(self, data, **kwargs): #TODO: Errors setting new data
"""
Set new given data in the database
Blocks until finnish
:param data: dictionary with given data. Saved as is if self.headers is None
:returns: New Data
"""
if self._check_child(data) != 0:
return None
conn_in, conn_out = Pipe(False)
test = None
if self._is_unique(data) is True:
self._send_pipe(action="replace", data=data, filter={self.unique: self.get_unique_hash(data)}, pipe=conn_out)
else:
self._send_pipe(action="new", data=data, pipe=conn_out)
recv = conn_in.recv()
return recv
def _new(self, data, registry, shelf):
with shelve_open(shelf) as file:
if self.headers is not None:
new_data = list()
for header in self.headers:
try:
new_data.append(data[header])
except KeyError:
new_data.append("")
data = new_data
file[str(registry)] = data
with shelve_open(self._meta_path) as file:
total, next_ = len(self), next(self) #Bug!
file["total"] = total + 1
file["next"] = next_ + 1
ids = list(file["ids"])
ids.append(str(registry))
file["ids"] = ids
self._set_index(data, registry)
def replace(self, filter, data, **kwargs):
"""
Replaces all data which coincides with given filter with given data
Blocks untill finnish
:param filter: dictionary with coincidences
:param data: dictionary with new data. It can be partial.
:returns: Data replaced
"""
if self._check_child(data) != 0:
return None
conn_in, conn_out = Pipe(False)
test = None
if ((self.unique in data and self.unique not in filter) or
(self.unique in data and self.unique in filter and data[self.unique]!=filter[self.unique])):
test = self.fetch({self.unique: data[self.unique]})
if not test:
self._send_pipe(action="replace", filter=filter, data=data, pipe=conn_out)
else:
return {"Error": "400"}
return conn_in.recv()
def _replace(self, data, registries, shelf):
with shelve_open(shelf) as file:
for reg in registries:
try:
old_data = self._fetch({reg}, shelf)[0]
except IndexError:
continue
else:
if isinstance(old_data, dict): # Verified twice. It has to be a dict
if "_id" in old_data:
del(old_data["_id"])
new_data = old_data.copy()
new_data.update(data)
self._del_index(old_data, reg)
if self.headers is not None:
new_data = [new_data[item] for item in self.headers]
file[str(reg)] = new_data
self._set_index(new_data, reg)
def edit(self, filter, data, **kwargs):
"""
replace alias
"""
if self._check_child(data) == 2:
return None
conn_in, conn_out = Pipe(False)
test = None
if ((self.unique in data and self.unique not in filter) or
(self.unique in data and self.unique in filter and data[self.unique] != filter[self.unique])):
test = self.fetch({self.unique: data[self.unique]}) #TODO Better
if not test:
self._send_pipe(action="edit", filter=filter, data=data, pipe=conn_out)
else:
return {"Error": "400"}
return conn_in.recv()
def _edit(self, data, registries, shelf):
self._replace(data, registries, shelf)
def drop(self, filter, **kwargs):
"""
Deletes data from database which coincides with given filter
Blocks untill finnish
:param filter: dictionary with given filter
:returns: Data
"""
conn_in, conn_out = Pipe(False)
self._send_pipe(action="drop", filter=filter, data={}, pipe=conn_out)
return conn_in.recv()
def _drop(self, data, registries, shelf):
for reg in registries:
try:
old_data = self._fetch({reg}, shelf)
except KeyError:
continue
else:
if self._as_foreign:
for item in self._as_foreign:
children = item.children.fetch({item.field: reg})
if item:
continue
if old_data != list():
self._del_index(old_data, reg)
with shelve_open(shelf) as file:
del(file[str(reg)])
with shelve_open(self._meta_path) as file:
file["total"] -= 1
ids = list(file["ids"])
del(ids[ids.index(str(reg))])
file["ids"] = ids
def _filter(self, filter):
while True:
try:
with shelve_open(self._meta_path) as shelf:
ids = list(shelf["ids"])
final_set = set([int(id) for id in ids])
except (KeyError, PermissionError) as e:
print("_filter: ", e)
time.sleep(random.randint(0, 2)+random.randint(0, 1000)/1000)
continue
else:
break
#final_set = set(range(0, next(self)))
order = str()
fields = list()
page = 1
items_per_page = self.items_per_page
if self._unique_is_id and self.unique in filter:
filter["_id"] = filter[self.unique]
del(filter[self.unique])
if "order" in filter:
order = filter["order"]
order = order.split(",")
if "page" in filter:
page = filter["page"]
if "items_per_page" in filter:
items_per_page = filter["items_per_page"]
if "fields" in filter:
fields = filter["fields"].split(",")
sub_order = dict()
final_order = list()
for field in filter:
if field not in ("page", "items_per_page", "fields"):
subfilter = set()
if field == "_id" and filter[field] != "":
subfilter = {int(filter["_id"])}
elif field == "_id" and filter[field] == "":
subfilter = final_set
else:
if self.light_index is True:
if os.path.exists(os.path.join(self._index_path(field), str(filter[field]))) is True:
subfilter = os.listdir(os.path.join(self._index_path(field), str(filter[field])))
subfilter = set([int(sub) for sub in subfilter])
else:
if any([os.path.exists(file)
for file in glob.glob("{}.*".format(self._index_path(field)))]+[False]):
with shelve_open(self._index_path(field), "r") as index:
if self.unique != field or self._split_unique == 0:
if str(filter[field]) in index:
subfilter = index[str(filter[field])]
else: #This is Shit!
offset = len(str(index))%self._split_unique
last = index
if offset:
inter = str(index)[0:offset]
last = last[inter]
for x in range(ceil(len(str(index))/self._split_unique)):
inter = str(index)[offset+x*self._split_unique:offset+(x+1)*self._split_unique]
last = last[inter]
subfilter = {last}
final_set &= subfilter
final_set = list(final_set)
final_set.sort()
if len(order) > 0:
for _id in final_set:
if self.light_index is True:
final_order = final_set
#TODO
else:
field = order[0] #TODO: Accept many fields
if field.startswith("-"):
sfield = field[1:]
else:
sfield = field
if any([os.path.exists(file)
for file in glob.glob("{}.*".format(self._index_path(sfield)))] + [False]):
with shelve_open(self._index_path(sfield), "r") as index:
sub_order[sfield] = index.copy()
keys = list(sub_order.keys())
if field.startswith("-"):
keys.reverse()
else:
keys.sort()
for indexes in keys:
for key in indexes:
if key in final_set:
final_order.append(key)
else:
final_order = final_set
return {"filter": final_order[int(items_per_page)*(int(page)-1):int(items_per_page)*int(page)],
"total": len(final_order),
"page": int(page),
"items_per_page": int(items_per_page),
"fields": fields}
def _get_datafile(self, filter):
assert isinstance(filter, list)
filename_reg = dict()
for reg in filter:
filename = self._data_path(reg % self.groups)
if filename not in filename_reg:
filename_reg[filename] = set()
filename_reg[filename] |= {reg}
return filename_reg
def get_count(self, filter, **kwargs):
filter = self._filter(filter)
return({"count": filter["total"]})
def direct_fetch(self, filter, filtered=None, **kwargs):
print("Filter Direct_Fetch: ", filter)
final = list()
filtered = self._filter(filter)
filter = filtered["filter"]
filter = self._get_datafile(filter)
if filtered is None:
filtered = self._filter(filter)
for filename in filter:
final.extend(self._fetch(filter[filename], filename))#Filter
new_final = list()
for _id in filtered["filter"]:
for index, item in enumerate(final):
if "_id" in item and item["_id"] == _id:
if filtered["fields"]:
new = dict()
for field in item:
if field == "_id" or field in filtered["fields"]:
new[field] = item[field]
new_final.append(new)
break
else:
new_final.append(item)
break
if new_final == list():
return {"Error": 404}
else:
return ({"data": new_final,
"total": filtered["total"],
"page": filtered["page"],
"items_per_page": filtered["items_per_page"]})
@threadize
def _writer(self):
"""
It may receive by self._pipe_out a dictionary with:
action: new, replace, drop or edit
filter: if not new, a set of registries
data: dictionary with the new data
"""
new = int()
while True:
try:
data = self._pipe_in.recv()
except EOFError:
self._close = True
break
else:
send = 0
if "filter" in data and data["action"] not in ("new", "fetch"):
filter = data["filter"]
filtered = self._filter(filter)
filter = filtered["filter"]
filename_reg = self._get_datafile(filter)
else:
if self._unique_is_id and self.unique in data["data"]:
filename_reg = data["data"][self.unique]
filename_reg = {self._data_path(filename_reg%self.groups): filename_reg}
del(data[self.unique])
elif isinstance(data["data"], list) and data["action"] == "insert":
total = next(self)
total_reg = len(data["data"])
filename_reg = dict()
for index, x in enumerate(range(total, total+total_reg)):
data_path = self._data_path(x % self.groups)
if data_path not in filename_reg:
filename_reg[data_path] = set()
filename_reg[data_path].add(x)
if not "dict_data" in data:
data["dict_data"] = dict()
data["dict_data"][str(x)] = data["data"][index]
data["data"] = dict(data["dict_data"])
del(data["dict_data"])
else:
total = next(self)
filename_reg = {self._data_path(total % self.groups): total}
for filename in filename_reg:
if data["action"] != "insert":
while True:
try:
if data["action"] != "fetch":
self.__getattribute__("_{}".format(data["action"]))(data["data"],
filename_reg[filename],
filename)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
print(e)
raise
time.sleep(0.1)
continue
else:
break
if data["action"] == "insert":
self._insert(data["data"], filename_reg)
if self._to_block is True:
if data["action"] != "insert":
if data["action"] == "new":
s_filter = {"_id": total}
else:
s_filter = data["filter"]
if data["action"] in ("new", "drop", "edit", "replace", "insert", "fetch"):
if data["action"] == "insert":
send = None
elif data["action"] == "fetch":
send = self.direct_fetch(s_filter)
else:
try:
fetched = self.direct_fetch(s_filter)
send = fetched
"""After an edit or a replace filter may change...
Is it a bug?"""
except KeyError:
send = None
if send is None:
if data["action"] in ("new", "insert"):
filtered = {"total": 1,
"page": 1,
"items_per_page": self.items_per_page}
send = {"data": [],
"total": filtered["total"],
"page": filtered["page"],
"items_per_page": filtered["items_per_page"]}
else:
send = None
self._alive = False
data["pipe"].send(send)
#TODO: send error
def close(self):
"""
Waits until all interactions are finnished
It's called before detroying the instance
"""
self._pipe_out.close()
while self._close is False:
time.sleep(0.5)
self.writer.join()
def _set_as_foreign(self, foreign_key):
self._as_foreign.append(foreign_key)
def _set_as_child(self, foreign_key):
self._as_child.append(foreign_key)
class ShelveForeign(RestfulBaseInterface):
"""
Foreign Key for ShelveModel. Too Cute to Be.
"""
def __init__(self, foreign_model, child_model, child_field, alias="_id", items_per_page=50):
"""
Instantiates ShelveForeign
:param foreign_model: ShelveModel relationed with child.
IE: Customer with Invoices
:param child_model: ShelveModel with a field linked to foreign_model
IE: Invoices of Customers
:param child_field: Field from child_model linked to _id in foreign_model.
It may exist in advance
:param alias: Existing field in foreign_model returned as data of child_field
_id by default
"""
assert isinstance(foreign_model, ShelveModel)
assert isinstance(child_model, ShelveModel)
assert isinstance(child_field, str)
assert isinstance(alias, str)
RestfulBaseInterface.__init__(self)
self._foreign_model = foreign_model # foreign
self._child_model = child_model # child
self._child_field = child_field # field
self._alias = alias #alias
self.foreign._set_as_foreign(self)
self.child._set_as_child(self)
self.items_per_page = items_per_page
@property
def foreign(self):
return self._foreign_model
@property
def child(self):
return self._child_model
@property
def field(self):
return self._child_field
@property
def alias(self):
return self._alias
def _filter(self, filter):
foreign_filter = dict()
child_filter = dict()
foreign_name = self.foreign.name+"_"
child_name = self.child.name+"_"
if type(filter) == str:
filter = json.loads(filter)
for field in filter:
if field.startswith(foreign_name) is True and filter[field] != "":
foreign_filter[field[len(foreign_name):]] = filter[field]
elif field.startswith(child_name) is True and filter[field] != "":
child_filter[field[len(child_name):]] = filter[field]
if self.field in child_filter:
foreign_filter["_id"] = child_filter[self.field]
return {"foreign": foreign_filter,
"child": child_filter}
def _unfilter_child(self, filter):
final = dict()
for key in filter:
final["{}_{}".format(self.child.name, key)] = filter[key]
return final
def fetch(self, filter, **kwargs):
"""
Fetches everything related
:param filter: Filter to apply
:param kwargs: Doesn't apply
:return: Data Filtered
"""
filter = self._filter(filter)
foreign_data = self.foreign.direct_fetch(filter["foreign"])
if type(foreign_data) == str:
foreign_data = json.loads(foreign_data)
if "data" in foreign_data:
f_data = foreign_data["data"]
elif type(foreign_data) == list:
f_data = [foreign_data]
else:
f_data = foreign_data
for item in f_data:
if "_id" in item:
child_filter = filter["child"].copy()
child_filter.update({self.field: item["_id"]})
child_data = self.child.direct_fetch(child_filter)
if "_embedded" not in item:
item["_embedded"] = dict()
item["_embedded"].update({self.child.name: child_data})
return foreign_data
def new(self, data, *, filter, **kwargs): #Redo
"""
Creates new child associated to a single foreign
:param data: New data
:param filter: Filter to apply to foreign, usually id
:param kwargs: Doesn't apply
:return: foreign data with all children asociated
"""
s_filter = self._filter(filter)
foreign_data = self.foreign.direct_fetch(s_filter["foreign"])
if type(foreign_data) == str:
foreign_data = json.loads(foreign_data)
if "data" in foreign_data:
f_data = foreign_data["data"][0]
else:
f_data = foreign_data
if "_id" in f_data:
data.update({self._child_field: f_data["_id"]})
if self._child_field in data:
foreign_data[self.child.name] = self.child.new(data)
return self.fetch(filter)
def drop(self, filter, **kwargs):
"""
Drops all children of all foreign got by filter
:param filter: Filter to apply to all
:param kwargs: Doesn't apply
:return: foreign data with all children asociated
"""
filter = self._filter(filter)
foreign_data = self.foreign.direct_fetch(filter["foreign"])
child_filter = filter["child"]
for item in foreign_data:
if "_id" in item:
child_filter.update({self._child_field: item["_id"]})
item[self.child.name] = self.child.drop(child_filter)
return self.fetch(filter)
def replace(self, filter, data, **kwargs):
"""
Replaces all children of all foreign got by filter with given data
:param filter: Filter to apply to all
:param data: New data to apply to all children
:param kwargs: Doesn't apply
:return: All foreigns with all children
"""
old_data = self.fetch(filter) #TODO HATEOAS
for foreign in old_data:
if self.child.name in foreign:
for children in foreign[self.child.name]:
children.update(data)
self.child.replace({"_id": children["_id"]}, children)
return self.fetch(filter)
def edit(self, filter, data, **kwargs):
"""
Alias of replace
"""
return self.replace(filter, data, **kwargs)
def close(self):
pass
class ShelveRelational(ShelveModel):
def __init__(self, *args, relations, **kwargs):
"""
Creates a relational shelve model related with models which names
are given by relations
:param args: args for ShelveModel
:param relations: list with related models
:param kwargs: kwargs for ShelveModel
"""
super().__init__(*args, **kwargs)
self._relations = dict(zip([rel.name for rel in relations], [rel for rel in relations]))
def fetch(self, filter, **kwargs):
data = super().direct_fetch(filter, **kwargs)
for item in data["data"]:
for field in item:
for name in self._relations:
if field.startswith(name+"_") is True:
item[name] = self._relations[name].fetch(
filter = {field[len(name+"_"):] : item[field]})
return data
class ShelveBlocking(ShelveModel):
"""
ShelveModel with a double interface:
An inner interface with new, edit, replace, drop and fetch whose take dictionaries.
A Restful interface with post, patch, put, delete and get whose take json data.
It blocks each registry on each get.
It implements a "next" verb which get a registry by filter and _id (next in list)
To use with zrest.
"""
def __init__(self, filepath, blocker=None, groups=10, *, index_fields=None,
headers=None,
name=None,
items_per_page=50,
unique=None,
unique_is_id=False,
split_unique=0,
to_block = True):
ShelveModel.__init__(self, filepath, groups=10, index_fields=index_fields,
headers=headers,
name=name,
items_per_page=items_per_page,
unique=unique,
unique_is_id=unique_is_id,
split_unique=split_unique,
to_block = to_block)
self._blocked_registry = {"blocker": None,
"master_id": None,
"timeout": datetime.datetime.now()}
self._blocking_model = ShelveModel(filepath+"-blocking", 1, index_fields=["blocker",
"master_id"],
headers=["blocker",
"master_id",
"timeout"],
unique="master_id")
self.blocker = blocker
@property
def blocked_registry(self):
return self._blocked_registry
@property
def blocking_model(self):
return self._blocking_model
def timeout(self):
return datetime.datetime.now()+datetime.timedelta(minutes=25)
def is_blocked(self, filter, blocker, **kwargs):
filtered = self._filter(filter)
s_filter = filtered["filter"]
_blocker = None
if len(s_filter) == 1:
blocked = self._blocking_model.direct_fetch({"master_id": s_filter[0]})
print("Blocked: ", blocked)
if ("data" in blocked and len(blocked["data"]) > 0 and "master_id" in blocked["data"][0] and
blocked["data"][0]["timeout"] >= datetime.datetime.now()):
_blocker = blocked["data"][0]["blocker"]
print(_blocker)
if _blocker is None:
return False
return blocker != _blocker
def fetch(self, filter, **kwargs): #Returns error 401 if blocked
if filter is not None and "unblock" in filter:
self.unblock_registry(filter)
return {"Error": 201}
else:
if "_blocker" in filter:
blocker = filter["_blocker"]
del(filter["_blocker"])
else:
blocker = self.blocker
filtered = self._filter(filter)
s_filter = filtered["filter"]
if len(s_filter) == 1:
if self.is_blocked(filter, blocker) is True:
return {"Error": 401}
else:
self._blocking_model.new({"blocker": blocker,
"master_id": s_filter[0],
"timeout": self.timeout()})
if "master_id" in filter:
filter["_id"] = filter["master_id"]
del(filter["master_id"])
return ShelveModel.direct_fetch(self, filter, **kwargs)
def edit(self, *args, **kwargs):
return self.replace(*args, **kwargs)
def replace(self, filter, data, **kwargs):
if filter is not None and "_blocker" in filter:
blocker = filter["_blocker"]
del(filter["_blocker"])
else:
blocker = self.blocker
print("Replace Filter: ", filter)
filtered = self._filter(filter)
s_filter = filtered["filter"]
for item in s_filter:
if self.is_blocked({"master_id": item}, blocker):
continue
else:
self._blocking_model.new({"blocker": blocker,
"master_id": item,
"timeout": self.timeout()})
return ShelveModel.replace(self, {"_id": item}, data)
def unblock_registry(self, filter=None):
if filter is not None and "_blocker" in filter:
blocker = filter["_blocker"]
del (filter["_blocker"])
else:
blocker = self.blocker
if filter is None:
filter = dict()
if "_id" in filter:
if self.is_blocked(filter, blocker) is False:
master_id = filter["_id"]
self._blocking_model.drop({"blocker": blocker,
"master_id": master_id})
else:
self._blocking_model.drop({"blocker": blocker})
return {"Error": 204}
def clean_timeouts(self, page=1):
all = self._blocking_model.fetch({"page": page})
for item in all["data"]:
self._blocking_model.drop(item)
if "total" in all and all["total"] > all["items_per_page"]*all["page"]:
self.clean_timeouts(all["page"]+1)
def get_next(self, filter, **kwargs): #This is a shit!
print("Filter get_next: ", filter)
if "_item" in filter:
item = filter["_item"]
del(filter["_item"])
else:
item = None
blocker = None
if "_blocker" in filter:
blocker = filter["_blocker"]
del(filter["_blocker"])
filtered = self._filter(filter)["filter"]
if item is None or int(item) not in filtered:
index = -1
else:
item = int(item)
index = filtered.index(item)
try:
index = filtered[index+1]
fil = {"_id": index}
if blocker:
fil["_blocker"] = blocker
data = self.fetch(fil)
if "Error" in data and data["Error"] == 401:
filter["_item"] = index
filter["_blocker"] = blocker
return self.get_next(filter)
else:
return data
except IndexError:
self.unblock_registry()
return {"Error": 404}
def close(self):
self._blocking_model.close()
ShelveModel.close(self)
| |
from cStringIO import StringIO
import urllib2
import urlparse
import lxml
import pytest
import gaereader
from google.appengine.ext import ndb, testbed
testbed = testbed.Testbed()
testbed.activate()
testbed.init_urlfetch_stub()
class Result(str):
def __init__(self, value):
super(Result, self).__init__(value)
self.content = value
@ndb.tasklet
def mock_urlfetch(self, url, **_kwargv):
if url == "https://www.google.com/accounts/ClientLogin":
result = "Auth=DUMMY"
elif url == "http://www.google.com/reader/api/0/tag/list?output=json":
result = '{"tags": [{"id":"user/0/"}]}'
elif url in [
"http://www.google.com/reader/atom/feed/feed?n=2",
"http://www.google.com/reader/atom/feed/url",
"http://www.google.com/reader/atom/user/0/label/tag",
"http://www.google.com/reader/atom/user/-/state/com.google/reading-list",
"http://www.google.com/reader/atom/user/-/state/com.google/read",
"http://www.google.com/reader/atom/user/-/state/com.google/starred",
"http://www.google.com/reader/atom/user/-/state/com.google/fresh",
"http://www.google.com/reader/atom/user/-/state/com.google/broadcast",
"http://www.google.com/reader/atom/user/-/state/com.google/state",
"url",
"url?c=c&r=o",
"url?output=format",
]:
result = '<?xml version="1.0"?><feed><entry><id>id</id></entry></feed>'
elif url.startswith("http://www.google.com/reader/api/0/search/items/ids?"):
query = dict(urlparse.parse_qsl(urlparse.urlparse(url).query))
if "s" in query:
assert query["s"] == "user/0/label/tag"
result = '{"results":[]}'
elif (url in [
"http://www.google.com/reader/api/0/token",
"http://www.google.com/reader/api/0/subscription/list?output=json",
"http://www.google.com/reader/api/0/preference/list?output=json",
"http://www.google.com/reader/api/0/unread-count?output=json",
]
or url.startswith("http://www.google.com/reader/api/0/stream/items/contents?ck=")
or url.startswith("http://www.google.com/reader/api/0/stream/contents/user%2F0%2Flabel%2Ftag?ck=")
or url.startswith("http://www.google.com/reader/api/0/stream/contents/feed/feed_url?ck=")
or url.startswith("http://www.google.com/reader/api/0/subscription/quickadd?ck=")):
result = '{}'
elif url in [
"http://www.google.com/reader/api/0/subscription/edit?client=mekk.reader_client",
"http://www.google.com/reader/api/0/disable-tag?client=mekk.reader_client",
]:
result = 'OK'
else:
raise ValueError(url)
result = Result(result)
result.status_code = 200
result.url = url
raise ndb.Return(result)
def pytest_funcarg__mock(request):
def setup():
mock = request.getfuncargvalue("monkeypatch")
mock.setattr(ndb.Context, "urlfetch", mock_urlfetch)
return mock
def teardown(mock):
mock.undo()
return request.cached_setup(setup=setup, teardown=teardown, scope="function")
def test_GoogleReaderClient(mock):
c = gaereader.GoogleReaderClient("login", "password")
future = c.tag_id("tag")
assert future.get_exception() is None
assert future.get_result() == "user/0/label/tag"
future = c.get_my_id()
assert future.get_exception() is None
assert future.get_result() == "0"
future = c.feed_item_id("feed")
assert future.get_exception() is None
assert future.get_result() == "id"
future = c.get_feed_atom("url")
assert future.get_exception() is None
assert isinstance(future.get_result(), lxml.etree._Element)
future = c.get_reading_list_atom()
assert future.get_exception() is None
assert isinstance(future.get_result(), lxml.etree._Element)
future = c.get_read_atom()
assert future.get_exception() is None
assert isinstance(future.get_result(), lxml.etree._Element)
future = c.get_tagged_atom("tag")
assert future.get_exception() is None
assert isinstance(future.get_result(), lxml.etree._Element)
future = c.get_starred_atom()
assert future.get_exception() is None
assert isinstance(future.get_result(), lxml.etree._Element)
future = c.get_fresh_atom()
assert future.get_exception() is None
assert isinstance(future.get_result(), lxml.etree._Element)
future = c.get_broadcast_atom()
assert future.get_exception() is None
assert isinstance(future.get_result(), lxml.etree._Element)
future = c.get_instate_atom("state")
assert future.get_exception() is None
assert isinstance(future.get_result(), lxml.etree._Element)
future = c.search_for_articles("query")
assert future.get_exception() is None
assert future.get_result() == []
future = c.search_for_articles("query", tag="tag")
assert future.get_exception() is None
assert future.get_result() == []
future = c.article_contents("ids")
assert future.get_exception() is None
assert future.get_result() == {}
future = c.contents("tag")
assert future.get_exception() is None
assert future.get_result() == {}
future = c.feed_contents("feed_url")
assert future.get_exception() is None
assert future.get_result() == {}
future = c.get_subscription_list()
assert future.get_exception() is None
assert future.get_result() == {}
future = c.get_tag_list()
assert future.get_exception() is None
assert future.get_result() == {u'tags': [{u'id': u'user/0/'}]}
future = c.get_preference_list()
assert future.get_exception() is None
assert future.get_result() == {}
future = c.get_unread_count()
assert future.get_exception() is None
assert future.get_result() == {}
future = c.subscribe_quickadd("site_url")
assert future.get_exception() is None
assert future.get_result() == {}
future = c.subscribe_feed("feed_url")
assert future.get_exception() is None
assert future.get_result() is None
future = c.unsubscribe_feed("feed_url")
assert future.get_exception() is None
assert future.get_result() is None
future = c.change_feed_title("feed_url", "title")
assert future.get_exception() is None
assert future.get_result() is None
future = c.add_feed_tag("feed_url", "title", "tag")
assert future.get_exception() is None
assert future.get_result() is None
future = c.remove_feed_tag("feed_url", "title", "tag")
assert future.get_exception() is None
assert future.get_result() is None
future = c.disable_tag("tag")
assert future.get_exception() is None
assert future.get_result() is None
future = c._get_atom("url", older_first=True, continue_from="c", format="etree")
assert future.get_exception() is None
assert isinstance(future.get_result(), lxml.etree._Element)
future = c._get_atom("url", format="unkown")
assert future.get_exception() is None
assert future.get_result() == '<?xml version="1.0"?><feed><entry><id>id</id></entry></feed>'
future = c._change_feed("feed_url", "operation", add_tag="tag", remove_tag="tag")
assert future.get_exception() is None
assert future.get_result() is None
future = c._get_list("url", "format")
assert future.get_exception() is None
assert future.get_result() == '<?xml version="1.0"?><feed><entry><id>id</id></entry></feed>'
| |
from sympy import (
symbols, expand, expand_func, nan, oo, Float, conjugate, diff,
re, im, Abs, O, factorial, exp_polar, polar_lift, gruntz, limit,
Symbol, I, integrate, S,
sqrt, sin, cos, sinh, cosh, exp, log, pi, EulerGamma,
erf, erfc, erfi, erf2, erfinv, erfcinv, erf2inv,
gamma, uppergamma, loggamma,
Ei, expint, E1, li, Li, Si, Ci, Shi, Chi,
fresnels, fresnelc,
hyper, meijerg)
from sympy.functions.special.error_functions import _erfs, _eis
from sympy.core.function import ArgumentIndexError
from sympy.utilities.pytest import raises
x, y, z = symbols('x,y,z')
w = Symbol("w", real=True)
n = Symbol("n", integer=True)
def test_erf():
assert erf(nan) == nan
assert erf(oo) == 1
assert erf(-oo) == -1
assert erf(0) == 0
assert erf(I*oo) == oo*I
assert erf(-I*oo) == -oo*I
assert erf(-2) == -erf(2)
assert erf(-x*y) == -erf(x*y)
assert erf(-x - y) == -erf(x + y)
assert erf(erfinv(x)) == x
assert erf(erfcinv(x)) == 1 - x
assert erf(erf2inv(0, x)) == x
assert erf(erf2inv(0, erf(erfcinv(1 - erf(erfinv(x)))))) == x
assert erf(I).is_real is False
assert erf(0).is_real is True
assert conjugate(erf(z)) == erf(conjugate(z))
assert erf(x).as_leading_term(x) == 2*x/sqrt(pi)
assert erf(1/x).as_leading_term(x) == erf(1/x)
assert erf(z).rewrite('uppergamma') == sqrt(z**2)*erf(sqrt(z**2))/z
assert erf(z).rewrite('erfc') == S.One - erfc(z)
assert erf(z).rewrite('erfi') == -I*erfi(I*z)
assert erf(z).rewrite('fresnels') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('fresnelc') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erf(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erf(z).rewrite('expint') == sqrt(z**2)/z - z*expint(S.Half, z**2)/sqrt(S.Pi)
assert limit(exp(x)*exp(x**2)*(erf(x + 1/exp(x)) - erf(x)), x, oo) == \
2/sqrt(pi)
assert limit((1 - erf(z))*exp(z**2)*z, z, oo) == 1/sqrt(pi)
assert limit((1 - erf(x))*exp(x**2)*sqrt(pi)*x, x, oo) == 1
assert limit(((1 - erf(x))*exp(x**2)*sqrt(pi)*x - 1)*2*x**2, x, oo) == -1
assert erf(x).as_real_imag() == \
((erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erf(x).fdiff(2))
def test_erf_series():
assert erf(x).series(x, 0, 7) == 2*x/sqrt(pi) - \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erf_evalf():
assert abs( erf(Float(2.0)) - 0.995322265 ) < 1E-8 # XXX
def test__erfs():
assert _erfs(z).diff(z) == -2/sqrt(S.Pi) + 2*z*_erfs(z)
assert _erfs(1/z).series(z) == \
z/sqrt(pi) - z**3/(2*sqrt(pi)) + 3*z**5/(4*sqrt(pi)) + O(z**6)
assert expand(erf(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== erf(z).diff(z)
assert _erfs(z).rewrite("intractable") == (-erf(z) + 1)*exp(z**2)
def test_erfc():
assert erfc(nan) == nan
assert erfc(oo) == 0
assert erfc(-oo) == 2
assert erfc(0) == 1
assert erfc(I*oo) == -oo*I
assert erfc(-I*oo) == oo*I
assert erfc(-x) == S(2) - erfc(x)
assert erfc(erfcinv(x)) == x
assert erfc(I).is_real is False
assert erfc(0).is_real is True
assert conjugate(erfc(z)) == erfc(conjugate(z))
assert erfc(x).as_leading_term(x) == S.One
assert erfc(1/x).as_leading_term(x) == erfc(1/x)
assert erfc(z).rewrite('erf') == 1 - erf(z)
assert erfc(z).rewrite('erfi') == 1 + I*erfi(I*z)
assert erfc(z).rewrite('fresnels') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('fresnelc') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('hyper') == 1 - 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erfc(z).rewrite('meijerg') == 1 - z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erfc(z).rewrite('uppergamma') == 1 - sqrt(z**2)*erf(sqrt(z**2))/z
assert erfc(z).rewrite('expint') == S.One - sqrt(z**2)/z + z*expint(S.Half, z**2)/sqrt(S.Pi)
assert erfc(x).as_real_imag() == \
((erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfc(x).fdiff(2))
def test_erfc_series():
assert erfc(x).series(x, 0, 7) == 1 - 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) - x**5/5/sqrt(pi) + O(x**7)
def test_erfc_evalf():
assert abs( erfc(Float(2.0)) - 0.00467773 ) < 1E-8 # XXX
def test_erfi():
assert erfi(nan) == nan
assert erfi(oo) == S.Infinity
assert erfi(-oo) == S.NegativeInfinity
assert erfi(0) == S.Zero
assert erfi(I*oo) == I
assert erfi(-I*oo) == -I
assert erfi(-x) == -erfi(x)
assert erfi(I*erfinv(x)) == I*x
assert erfi(I*erfcinv(x)) == I*(1 - x)
assert erfi(I*erf2inv(0, x)) == I*x
assert erfi(I).is_real is False
assert erfi(0).is_real is True
assert conjugate(erfi(z)) == erfi(conjugate(z))
assert erfi(z).rewrite('erf') == -I*erf(I*z)
assert erfi(z).rewrite('erfc') == I*erfc(I*z) - I
assert erfi(z).rewrite('fresnels') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('fresnelc') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], z**2)/sqrt(pi)
assert erfi(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], -z**2)/sqrt(pi)
assert erfi(z).rewrite('uppergamma') == (sqrt(-z**2)/z*(uppergamma(S.Half,
-z**2)/sqrt(S.Pi) - S.One))
assert erfi(z).rewrite('expint') == sqrt(-z**2)/z - z*expint(S.Half, -z**2)/sqrt(S.Pi)
assert erfi(x).as_real_imag() == \
((erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfi(x).fdiff(2))
def test_erfi_series():
assert erfi(x).series(x, 0, 7) == 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erfi_evalf():
assert abs( erfi(Float(2.0)) - 18.5648024145756 ) < 1E-13 # XXX
def test_erf2():
assert erf2(0, 0) == S.Zero
assert erf2(x, x) == S.Zero
assert erf2(nan, 0) == nan
assert erf2(-oo, y) == erf(y) + 1
assert erf2( oo, y) == erf(y) - 1
assert erf2( x, oo) == 1 - erf(x)
assert erf2( x,-oo) == -1 - erf(x)
assert erf2(x, erf2inv(x, y)) == y
assert erf2(-x, -y) == -erf2(x,y)
assert erf2(-x, y) == erf(y) + erf(x)
assert erf2( x, -y) == -erf(y) - erf(x)
assert erf2(x, y).rewrite('fresnels') == erf(y).rewrite(fresnels)-erf(x).rewrite(fresnels)
assert erf2(x, y).rewrite('fresnelc') == erf(y).rewrite(fresnelc)-erf(x).rewrite(fresnelc)
assert erf2(x, y).rewrite('hyper') == erf(y).rewrite(hyper)-erf(x).rewrite(hyper)
assert erf2(x, y).rewrite('meijerg') == erf(y).rewrite(meijerg)-erf(x).rewrite(meijerg)
assert erf2(x, y).rewrite('uppergamma') == erf(y).rewrite(uppergamma) - erf(x).rewrite(uppergamma)
assert erf2(x, y).rewrite('expint') == erf(y).rewrite(expint)-erf(x).rewrite(expint)
assert erf2(I, 0).is_real is False
assert erf2(0, 0).is_real is True
#assert conjugate(erf2(x, y)) == erf2(conjugate(x), conjugate(y))
assert erf2(x, y).rewrite('erf') == erf(y) - erf(x)
assert erf2(x, y).rewrite('erfc') == erfc(x) - erfc(y)
assert erf2(x, y).rewrite('erfi') == I*(erfi(I*x) - erfi(I*y))
raises(ArgumentIndexError, lambda: erfi(x).fdiff(3))
def test_erfinv():
assert erfinv(0) == 0
assert erfinv(1) == S.Infinity
assert erfinv(nan) == S.NaN
assert erfinv(erf(w)) == w
assert erfinv(erf(-w)) == -w
assert erfinv(x).diff() == sqrt(pi)*exp(erfinv(x)**2)/2
assert erfinv(z).rewrite('erfcinv') == erfcinv(1-z)
def test_erfinv_evalf():
assert abs( erfinv(Float(0.2)) - 0.179143454621292 ) < 1E-13
def test_erfcinv():
assert erfcinv(1) == 0
assert erfcinv(0) == S.Infinity
assert erfcinv(nan) == S.NaN
assert erfcinv(x).diff() == -sqrt(pi)*exp(erfcinv(x)**2)/2
assert erfcinv(z).rewrite('erfinv') == erfinv(1-z)
def test_erf2inv():
assert erf2inv(0, 0) == S.Zero
assert erf2inv(0, 1) == S.Infinity
assert erf2inv(1, 0) == S.One
assert erf2inv(0, y) == erfinv(y)
assert erf2inv(oo,y) == erfcinv(-y)
assert erf2inv(x, y).diff(x) == exp(-x**2 + erf2inv(x, y)**2)
assert erf2inv(x, y).diff(y) == sqrt(pi)*exp(erf2inv(x, y)**2)/2
# NOTE we multiply by exp_polar(I*pi) and need this to be on the principal
# branch, hence take x in the lower half plane (d=0).
def mytn(expr1, expr2, expr3, x, d=0):
from sympy.utilities.randtest import test_numerically, random_complex_number
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr2 == expr3 and test_numerically(expr1.subs(subs),
expr2.subs(subs), x, d=d)
def mytd(expr1, expr2, x):
from sympy.utilities.randtest import test_derivative_numerically, \
random_complex_number
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr1.diff(x) == expr2 and test_derivative_numerically(expr1.subs(subs), x)
def tn_branch(func, s=None):
from sympy import I, pi, exp_polar
from random import uniform
def fn(x):
if s is None:
return func(x)
return func(s, x)
c = uniform(1, 5)
expr = fn(c*exp_polar(I*pi)) - fn(c*exp_polar(-I*pi))
eps = 1e-15
expr2 = fn(-c + eps*I) - fn(-c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_ei():
pos = Symbol('p', positive=True)
neg = Symbol('n', negative=True)
assert Ei(-pos) == Ei(polar_lift(-1)*pos) - I*pi
assert Ei(neg) == Ei(polar_lift(neg)) - I*pi
assert tn_branch(Ei)
assert mytd(Ei(x), exp(x)/x, x)
assert mytn(Ei(x), Ei(x).rewrite(uppergamma),
-uppergamma(0, x*polar_lift(-1)) - I*pi, x)
assert mytn(Ei(x), Ei(x).rewrite(expint),
-expint(1, x*polar_lift(-1)) - I*pi, x)
assert Ei(x).rewrite(expint).rewrite(Ei) == Ei(x)
assert Ei(x*exp_polar(2*I*pi)) == Ei(x) + 2*I*pi
assert Ei(x*exp_polar(-2*I*pi)) == Ei(x) - 2*I*pi
assert mytn(Ei(x), Ei(x).rewrite(Shi), Chi(x) + Shi(x), x)
assert mytn(Ei(x*polar_lift(I)), Ei(x*polar_lift(I)).rewrite(Si),
Ci(x) + I*Si(x) + I*pi/2, x)
assert Ei(log(x)).rewrite(li) == li(x)
assert Ei(2*log(x)).rewrite(li) == li(x**2)
assert gruntz(Ei(x+exp(-x))*exp(-x)*x, x, oo) == 1
assert Ei(x).series(x) == EulerGamma + log(x) + x + x**2/4 + \
x**3/18 + x**4/96 + x**5/600 + O(x**6)
def test_expint():
assert mytn(expint(x, y), expint(x, y).rewrite(uppergamma),
y**(x - 1)*uppergamma(1 - x, y), x)
assert mytd(
expint(x, y), -y**(x - 1)*meijerg([], [1, 1], [0, 0, 1 - x], [], y), x)
assert mytd(expint(x, y), -expint(x - 1, y), y)
assert mytn(expint(1, x), expint(1, x).rewrite(Ei),
-Ei(x*polar_lift(-1)) + I*pi, x)
assert expint(-4, x) == exp(-x)/x + 4*exp(-x)/x**2 + 12*exp(-x)/x**3 \
+ 24*exp(-x)/x**4 + 24*exp(-x)/x**5
assert expint(-S(3)/2, x) == \
exp(-x)/x + 3*exp(-x)/(2*x**2) - 3*sqrt(pi)*erf(sqrt(x))/(4*x**S('5/2')) \
+ 3*sqrt(pi)/(4*x**S('5/2'))
assert tn_branch(expint, 1)
assert tn_branch(expint, 2)
assert tn_branch(expint, 3)
assert tn_branch(expint, 1.7)
assert tn_branch(expint, pi)
assert expint(y, x*exp_polar(2*I*pi)) == \
x**(y - 1)*(exp(2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(y, x*exp_polar(-2*I*pi)) == \
x**(y - 1)*(exp(-2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(2, x*exp_polar(2*I*pi)) == 2*I*pi*x + expint(2, x)
assert expint(2, x*exp_polar(-2*I*pi)) == -2*I*pi*x + expint(2, x)
assert expint(1, x).rewrite(Ei).rewrite(expint) == expint(1, x)
assert mytn(E1(x), E1(x).rewrite(Shi), Shi(x) - Chi(x), x)
assert mytn(E1(polar_lift(I)*x), E1(polar_lift(I)*x).rewrite(Si),
-Ci(x) + I*Si(x) - I*pi/2, x)
assert mytn(expint(2, x), expint(2, x).rewrite(Ei).rewrite(expint),
-x*E1(x) + exp(-x), x)
assert mytn(expint(3, x), expint(3, x).rewrite(Ei).rewrite(expint),
x**2*E1(x)/2 + (1 - x)*exp(-x)/2, x)
assert expint(S(3)/2, z).nseries(z) == \
2 + 2*z - z**2/3 + z**3/15 - z**4/84 + z**5/540 - \
2*sqrt(pi)*sqrt(z) + O(z**6)
assert E1(z).series(z) == -EulerGamma - log(z) + z - \
z**2/4 + z**3/18 - z**4/96 + z**5/600 + O(z**6)
assert expint(4, z).series(z) == S(1)/3 - z/2 + z**2/2 + \
z**3*(log(z)/6 - S(11)/36 + EulerGamma/6) - z**4/24 + \
z**5/240 + O(z**6)
def test__eis():
assert _eis(z).diff(z) == -_eis(z) + 1/z
assert _eis(1/z).series(z) == \
z + z**2 + 2*z**3 + 6*z**4 + 24*z**5 + O(z**6)
assert Ei(z).rewrite('tractable') == exp(z)*_eis(z)
assert li(z).rewrite('tractable') == z*_eis(log(z))
assert _eis(z).rewrite('intractable') == exp(-z)*Ei(z)
assert expand(li(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== li(z).diff(z)
assert expand(Ei(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== Ei(z).diff(z)
assert _eis(z).series(z, n=3) == EulerGamma + log(z) + z*(-log(z) - \
EulerGamma + 1) + z**2*(log(z)/2 - S(3)/4 + EulerGamma/2) + O(z**3*log(z))
def tn_arg(func):
def test(arg, e1, e2):
from random import uniform
v = uniform(1, 5)
v1 = func(arg*x).subs(x, v).n()
v2 = func(e1*v + e2*1e-15).n()
return abs(v1 - v2).n() < 1e-10
return test(exp_polar(I*pi/2), I, 1) and \
test(exp_polar(-I*pi/2), -I, 1) and \
test(exp_polar(I*pi), -1, I) and \
test(exp_polar(-I*pi), -1, -I)
def test_li():
z = Symbol("z")
zr = Symbol("z", real=True)
zp = Symbol("z", positive=True)
zn = Symbol("z", negative=True)
assert li(0) == 0
assert li(1) == -oo
assert li(oo) == oo
assert isinstance(li(z), li)
assert diff(li(z), z) == 1/log(z)
assert conjugate(li(z)) == li(conjugate(z))
assert conjugate(li(-zr)) == li(-zr)
assert conjugate(li(-zp)) == conjugate(li(-zp))
assert conjugate(li(zn)) == conjugate(li(zn))
assert li(z).rewrite(Li) == Li(z) + li(2)
assert li(z).rewrite(Ei) == Ei(log(z))
assert li(z).rewrite(uppergamma) == (-log(1/log(z))/2 - log(-log(z)) +
log(log(z))/2 - expint(1, -log(z)))
assert li(z).rewrite(Si) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Ci) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Shi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(Chi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(hyper) ==(log(z)*hyper((1, 1), (2, 2), log(z)) -
log(1/log(z))/2 + log(log(z))/2 + EulerGamma)
assert li(z).rewrite(meijerg) == (-log(1/log(z))/2 - log(-log(z)) + log(log(z))/2 -
meijerg(((), (1,)), ((0, 0), ()), -log(z)))
assert gruntz(1/li(z), z, oo) == 0
def test_Li():
assert Li(2) == 0
assert Li(oo) == oo
assert isinstance(Li(z), Li)
assert diff(Li(z), z) == 1/log(z)
assert gruntz(1/Li(z), z, oo) == 0
assert Li(z).rewrite(li) == li(z) - li(2)
def test_si():
assert Si(I*x) == I*Shi(x)
assert Shi(I*x) == I*Si(x)
assert Si(-I*x) == -I*Shi(x)
assert Shi(-I*x) == -I*Si(x)
assert Si(-x) == -Si(x)
assert Shi(-x) == -Shi(x)
assert Si(exp_polar(2*pi*I)*x) == Si(x)
assert Si(exp_polar(-2*pi*I)*x) == Si(x)
assert Shi(exp_polar(2*pi*I)*x) == Shi(x)
assert Shi(exp_polar(-2*pi*I)*x) == Shi(x)
assert Si(oo) == pi/2
assert Si(-oo) == -pi/2
assert Shi(oo) == oo
assert Shi(-oo) == -oo
assert mytd(Si(x), sin(x)/x, x)
assert mytd(Shi(x), sinh(x)/x, x)
assert mytn(Si(x), Si(x).rewrite(Ei),
-I*(-Ei(x*exp_polar(-I*pi/2))/2
+ Ei(x*exp_polar(I*pi/2))/2 - I*pi) + pi/2, x)
assert mytn(Si(x), Si(x).rewrite(expint),
-I*(-expint(1, x*exp_polar(-I*pi/2))/2 +
expint(1, x*exp_polar(I*pi/2))/2) + pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(Ei),
Ei(x)/2 - Ei(x*exp_polar(I*pi))/2 + I*pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(expint),
expint(1, x)/2 - expint(1, x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Si)
assert tn_arg(Shi)
assert Si(x).nseries(x, n=8) == \
x - x**3/18 + x**5/600 - x**7/35280 + O(x**9)
assert Shi(x).nseries(x, n=8) == \
x + x**3/18 + x**5/600 + x**7/35280 + O(x**9)
assert Si(sin(x)).nseries(x, n=5) == x - 2*x**3/9 + 17*x**5/450 + O(x**6)
assert Si(x).nseries(x, 1, n=3) == \
Si(1) + x*sin(1) + x**2*(-sin(1)/2 + cos(1)/2) + O(x**3)
def test_ci():
m1 = exp_polar(I*pi)
m1_ = exp_polar(-I*pi)
pI = exp_polar(I*pi/2)
mI = exp_polar(-I*pi/2)
assert Ci(m1*x) == Ci(x) + I*pi
assert Ci(m1_*x) == Ci(x) - I*pi
assert Ci(pI*x) == Chi(x) + I*pi/2
assert Ci(mI*x) == Chi(x) - I*pi/2
assert Chi(m1*x) == Chi(x) + I*pi
assert Chi(m1_*x) == Chi(x) - I*pi
assert Chi(pI*x) == Ci(x) + I*pi/2
assert Chi(mI*x) == Ci(x) - I*pi/2
assert Ci(exp_polar(2*I*pi)*x) == Ci(x) + 2*I*pi
assert Chi(exp_polar(-2*I*pi)*x) == Chi(x) - 2*I*pi
assert Chi(exp_polar(2*I*pi)*x) == Chi(x) + 2*I*pi
assert Ci(exp_polar(-2*I*pi)*x) == Ci(x) - 2*I*pi
assert Ci(oo) == 0
assert Ci(-oo) == I*pi
assert Chi(oo) == oo
assert Chi(-oo) == oo
assert mytd(Ci(x), cos(x)/x, x)
assert mytd(Chi(x), cosh(x)/x, x)
assert mytn(Ci(x), Ci(x).rewrite(Ei),
Ei(x*exp_polar(-I*pi/2))/2 + Ei(x*exp_polar(I*pi/2))/2, x)
assert mytn(Chi(x), Chi(x).rewrite(Ei),
Ei(x)/2 + Ei(x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Ci)
assert tn_arg(Chi)
from sympy import O, EulerGamma, log, limit
assert Ci(x).nseries(x, n=4) == \
EulerGamma + log(x) - x**2/4 + x**4/96 + O(x**5)
assert Chi(x).nseries(x, n=4) == \
EulerGamma + log(x) + x**2/4 + x**4/96 + O(x**5)
assert limit(log(x) - Ci(2*x), x, 0) == -log(2) - EulerGamma
def test_fresnel():
assert fresnels(0) == 0
assert fresnels(oo) == S.Half
assert fresnels(-oo) == -S.Half
assert fresnels(z) == fresnels(z)
assert fresnels(-z) == -fresnels(z)
assert fresnels(I*z) == -I*fresnels(z)
assert fresnels(-I*z) == I*fresnels(z)
assert conjugate(fresnels(z)) == fresnels(conjugate(z))
assert fresnels(z).diff(z) == sin(pi*z**2/2)
assert fresnels(z).rewrite(erf) == (S.One + I)/4 * (
erf((S.One + I)/2*sqrt(pi)*z) - I*erf((S.One - I)/2*sqrt(pi)*z))
assert fresnels(z).rewrite(hyper) == \
pi*z**3/6 * hyper([S(3)/4], [S(3)/2, S(7)/4], -pi**2*z**4/16)
assert fresnels(z).series(z, n=15) == \
pi*z**3/6 - pi**3*z**7/336 + pi**5*z**11/42240 + O(z**15)
assert fresnels(w).is_real is True
assert fresnels(z).as_real_imag() == \
((fresnels(re(z) - I*re(z)*Abs(im(z))/Abs(re(z)))/2 +
fresnels(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))/2,
I*(fresnels(re(z) - I*re(z)*Abs(im(z))/Abs(re(z))) -
fresnels(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))) *
re(z)*Abs(im(z))/(2*im(z)*Abs(re(z)))))
assert fresnels(2 + 3*I).as_real_imag() == (
fresnels(2 + 3*I)/2 + fresnels(2 - 3*I)/2,
I*(fresnels(2 - 3*I) - fresnels(2 + 3*I))/2
)
assert expand_func(integrate(fresnels(z), z)) == \
z*fresnels(z) + cos(pi*z**2/2)/pi
assert fresnels(z).rewrite(meijerg) == sqrt(2)*pi*z**(S(9)/4) * \
meijerg(((), (1,)), ((S(3)/4,),
(S(1)/4, 0)), -pi**2*z**4/16)/(2*(-z)**(S(3)/4)*(z**2)**(S(3)/4))
assert fresnelc(0) == 0
assert fresnelc(oo) == S.Half
assert fresnelc(-oo) == -S.Half
assert fresnelc(z) == fresnelc(z)
assert fresnelc(-z) == -fresnelc(z)
assert fresnelc(I*z) == I*fresnelc(z)
assert fresnelc(-I*z) == -I*fresnelc(z)
assert conjugate(fresnelc(z)) == fresnelc(conjugate(z))
assert fresnelc(z).diff(z) == cos(pi*z**2/2)
assert fresnelc(z).rewrite(erf) == (S.One - I)/4 * (
erf((S.One + I)/2*sqrt(pi)*z) + I*erf((S.One - I)/2*sqrt(pi)*z))
assert fresnelc(z).rewrite(hyper) == \
z * hyper([S.One/4], [S.One/2, S(5)/4], -pi**2*z**4/16)
assert fresnelc(z).series(z, n=15) == \
z - pi**2*z**5/40 + pi**4*z**9/3456 - pi**6*z**13/599040 + O(z**15)
# issue 6510
assert fresnels(z).series(z, S.Infinity) == \
(-1/(pi**2*z**3) + O(z**(-6), (z, oo)))*sin(pi*z**2/2) + \
(3/(pi**3*z**5) - 1/(pi*z) + O(z**(-6), (z, oo)))*cos(pi*z**2/2) + S.Half
assert fresnelc(z).series(z, S.Infinity) == \
(-1/(pi**2*z**3) + O(z**(-6), (z, oo)))*cos(pi*z**2/2) + \
(-3/(pi**3*z**5) + 1/(pi*z) + O(z**(-6), (z, oo)))*sin(pi*z**2/2) + S.Half
assert fresnels(1/z).series(z) == \
(-z**3/pi**2 + O(z**6))*sin(pi/(2*z**2)) + (-z/pi + 3*z**5/pi**3 + \
O(z**6))*cos(pi/(2*z**2)) + S.Half
assert fresnelc(1/z).series(z) == \
(-z**3/pi**2 + O(z**6))*cos(pi/(2*z**2)) + (z/pi - 3*z**5/pi**3 + \
O(z**6))*sin(pi/(2*z**2)) + S.Half
assert fresnelc(w).is_real is True
assert fresnelc(z).as_real_imag() == \
((fresnelc(re(z) - I*re(z)*Abs(im(z))/Abs(re(z)))/2 +
fresnelc(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))/2,
I*(fresnelc(re(z) - I*re(z)*Abs(im(z))/Abs(re(z))) -
fresnelc(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))) *
re(z)*Abs(im(z))/(2*im(z)*Abs(re(z)))))
assert fresnelc(2 + 3*I).as_real_imag() == (
fresnelc(2 - 3*I)/2 + fresnelc(2 + 3*I)/2,
I*(fresnelc(2 - 3*I) - fresnelc(2 + 3*I))/2
)
assert expand_func(integrate(fresnelc(z), z)) == \
z*fresnelc(z) - sin(pi*z**2/2)/pi
assert fresnelc(z).rewrite(meijerg) == sqrt(2)*pi*z**(S(3)/4) * \
meijerg(((), (1,)), ((S(1)/4,),
(S(3)/4, 0)), -pi**2*z**4/16)/(2*(-z)**(S(1)/4)*(z**2)**(S(1)/4))
from sympy.utilities.randtest import test_numerically
test_numerically(re(fresnels(z)), fresnels(z).as_real_imag()[0], z)
test_numerically(im(fresnels(z)), fresnels(z).as_real_imag()[1], z)
test_numerically(fresnels(z), fresnels(z).rewrite(hyper), z)
test_numerically(fresnels(z), fresnels(z).rewrite(meijerg), z)
test_numerically(re(fresnelc(z)), fresnelc(z).as_real_imag()[0], z)
test_numerically(im(fresnelc(z)), fresnelc(z).as_real_imag()[1], z)
test_numerically(fresnelc(z), fresnelc(z).rewrite(hyper), z)
test_numerically(fresnelc(z), fresnelc(z).rewrite(meijerg), z)
| |
#
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#
import sys
from antlr4.IntervalSet import IntervalSet
from antlr4.Token import Token
from antlr4.atn.ATNState import ATNState
from antlr4.error.Errors import RecognitionException, NoViableAltException, InputMismatchException, \
FailedPredicateException, ParseCancellationException
# need forward declaration
Parser = None
class ErrorStrategy(object):
def reset(self, recognizer:Parser):
pass
def recoverInline(self, recognizer:Parser):
pass
def recover(self, recognizer:Parser, e:RecognitionException):
pass
def sync(self, recognizer:Parser):
pass
def inErrorRecoveryMode(self, recognizer:Parser):
pass
def reportError(self, recognizer:Parser, e:RecognitionException):
pass
# This is the default implementation of {@link ANTLRErrorStrategy} used for
# error reporting and recovery in ANTLR parsers.
#
class DefaultErrorStrategy(ErrorStrategy):
def __init__(self):
super().__init__()
# Indicates whether the error strategy is currently "recovering from an
# error". This is used to suppress reporting multiple error messages while
# attempting to recover from a detected syntax error.
#
# @see #inErrorRecoveryMode
#
self.errorRecoveryMode = False
# The index into the input stream where the last error occurred.
# This is used to prevent infinite loops where an error is found
# but no token is consumed during recovery...another error is found,
# ad nauseum. This is a failsafe mechanism to guarantee that at least
# one token/tree node is consumed for two errors.
#
self.lastErrorIndex = -1
self.lastErrorStates = None
# <p>The default implementation simply calls {@link #endErrorCondition} to
# ensure that the handler is not in error recovery mode.</p>
def reset(self, recognizer:Parser):
self.endErrorCondition(recognizer)
#
# This method is called to enter error recovery mode when a recognition
# exception is reported.
#
# @param recognizer the parser instance
#
def beginErrorCondition(self, recognizer:Parser):
self.errorRecoveryMode = True
def inErrorRecoveryMode(self, recognizer:Parser):
return self.errorRecoveryMode
#
# This method is called to leave error recovery mode after recovering from
# a recognition exception.
#
# @param recognizer
#
def endErrorCondition(self, recognizer:Parser):
self.errorRecoveryMode = False
self.lastErrorStates = None
self.lastErrorIndex = -1
#
# {@inheritDoc}
#
# <p>The default implementation simply calls {@link #endErrorCondition}.</p>
#
def reportMatch(self, recognizer:Parser):
self.endErrorCondition(recognizer)
#
# {@inheritDoc}
#
# <p>The default implementation returns immediately if the handler is already
# in error recovery mode. Otherwise, it calls {@link #beginErrorCondition}
# and dispatches the reporting task based on the runtime type of {@code e}
# according to the following table.</p>
#
# <ul>
# <li>{@link NoViableAltException}: Dispatches the call to
# {@link #reportNoViableAlternative}</li>
# <li>{@link InputMismatchException}: Dispatches the call to
# {@link #reportInputMismatch}</li>
# <li>{@link FailedPredicateException}: Dispatches the call to
# {@link #reportFailedPredicate}</li>
# <li>All other types: calls {@link Parser#notifyErrorListeners} to report
# the exception</li>
# </ul>
#
def reportError(self, recognizer:Parser, e:RecognitionException):
# if we've already reported an error and have not matched a token
# yet successfully, don't report any errors.
if self.inErrorRecoveryMode(recognizer):
return # don't report spurious errors
self.beginErrorCondition(recognizer)
if isinstance( e, NoViableAltException ):
self.reportNoViableAlternative(recognizer, e)
elif isinstance( e, InputMismatchException ):
self.reportInputMismatch(recognizer, e)
elif isinstance( e, FailedPredicateException ):
self.reportFailedPredicate(recognizer, e)
else:
print("unknown recognition error type: " + type(e).__name__)
recognizer.notifyErrorListeners(e.message, e.offendingToken, e)
#
# {@inheritDoc}
#
# <p>The default implementation resynchronizes the parser by consuming tokens
# until we find one in the resynchronization set--loosely the set of tokens
# that can follow the current rule.</p>
#
def recover(self, recognizer:Parser, e:RecognitionException):
if self.lastErrorIndex==recognizer.getInputStream().index \
and self.lastErrorStates is not None \
and recognizer.state in self.lastErrorStates:
# uh oh, another error at same token index and previously-visited
# state in ATN; must be a case where LT(1) is in the recovery
# token set so nothing got consumed. Consume a single token
# at least to prevent an infinite loop; this is a failsafe.
recognizer.consume()
self.lastErrorIndex = recognizer._input.index
if self.lastErrorStates is None:
self.lastErrorStates = []
self.lastErrorStates.append(recognizer.state)
followSet = self.getErrorRecoverySet(recognizer)
self.consumeUntil(recognizer, followSet)
# The default implementation of {@link ANTLRErrorStrategy#sync} makes sure
# that the current lookahead symbol is consistent with what were expecting
# at this point in the ATN. You can call this anytime but ANTLR only
# generates code to check before subrules/loops and each iteration.
#
# <p>Implements Jim Idle's magic sync mechanism in closures and optional
# subrules. E.g.,</p>
#
# <pre>
# a : sync ( stuff sync )* ;
# sync : {consume to what can follow sync} ;
# </pre>
#
# At the start of a sub rule upon error, {@link #sync} performs single
# token deletion, if possible. If it can't do that, it bails on the current
# rule and uses the default error recovery, which consumes until the
# resynchronization set of the current rule.
#
# <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
# with an empty alternative), then the expected set includes what follows
# the subrule.</p>
#
# <p>During loop iteration, it consumes until it sees a token that can start a
# sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
# stay in the loop as long as possible.</p>
#
# <p><strong>ORIGINS</strong></p>
#
# <p>Previous versions of ANTLR did a poor job of their recovery within loops.
# A single mismatch token or missing token would force the parser to bail
# out of the entire rules surrounding the loop. So, for rule</p>
#
# <pre>
# classDef : 'class' ID '{' member* '}'
# </pre>
#
# input with an extra token between members would force the parser to
# consume until it found the next class definition rather than the next
# member definition of the current class.
#
# <p>This functionality cost a little bit of effort because the parser has to
# compare token set at the start of the loop and at each iteration. If for
# some reason speed is suffering for you, you can turn off this
# functionality by simply overriding this method as a blank { }.</p>
#
def sync(self, recognizer:Parser):
# If already recovering, don't try to sync
if self.inErrorRecoveryMode(recognizer):
return
s = recognizer._interp.atn.states[recognizer.state]
la = recognizer.getTokenStream().LA(1)
# try cheaper subset first; might get lucky. seems to shave a wee bit off
nextTokens = recognizer.atn.nextTokens(s)
if Token.EPSILON in nextTokens or la in nextTokens:
return
if s.stateType in [ATNState.BLOCK_START, ATNState.STAR_BLOCK_START,
ATNState.PLUS_BLOCK_START, ATNState.STAR_LOOP_ENTRY]:
# report error and recover if possible
if self.singleTokenDeletion(recognizer)is not None:
return
else:
raise InputMismatchException(recognizer)
elif s.stateType in [ATNState.PLUS_LOOP_BACK, ATNState.STAR_LOOP_BACK]:
self.reportUnwantedToken(recognizer)
expecting = recognizer.getExpectedTokens()
whatFollowsLoopIterationOrRule = expecting.addSet(self.getErrorRecoverySet(recognizer))
self.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
else:
# do nothing if we can't identify the exact kind of ATN state
pass
# This is called by {@link #reportError} when the exception is a
# {@link NoViableAltException}.
#
# @see #reportError
#
# @param recognizer the parser instance
# @param e the recognition exception
#
def reportNoViableAlternative(self, recognizer:Parser, e:NoViableAltException):
tokens = recognizer.getTokenStream()
if tokens is not None:
if e.startToken.type==Token.EOF:
input = "<EOF>"
else:
input = tokens.getText((e.startToken, e.offendingToken))
else:
input = "<unknown input>"
msg = "no viable alternative at input " + self.escapeWSAndQuote(input)
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
#
# This is called by {@link #reportError} when the exception is an
# {@link InputMismatchException}.
#
# @see #reportError
#
# @param recognizer the parser instance
# @param e the recognition exception
#
def reportInputMismatch(self, recognizer:Parser, e:InputMismatchException):
msg = "mismatched input " + self.getTokenErrorDisplay(e.offendingToken) \
+ " expecting " + e.getExpectedTokens().toString(recognizer.literalNames, recognizer.symbolicNames)
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
#
# This is called by {@link #reportError} when the exception is a
# {@link FailedPredicateException}.
#
# @see #reportError
#
# @param recognizer the parser instance
# @param e the recognition exception
#
def reportFailedPredicate(self, recognizer, e):
ruleName = recognizer.ruleNames[recognizer._ctx.getRuleIndex()]
msg = "rule " + ruleName + " " + e.message
recognizer.notifyErrorListeners(msg, e.offendingToken, e)
# This method is called to report a syntax error which requires the removal
# of a token from the input stream. At the time this method is called, the
# erroneous symbol is current {@code LT(1)} symbol and has not yet been
# removed from the input stream. When this method returns,
# {@code recognizer} is in error recovery mode.
#
# <p>This method is called when {@link #singleTokenDeletion} identifies
# single-token deletion as a viable recovery strategy for a mismatched
# input error.</p>
#
# <p>The default implementation simply returns if the handler is already in
# error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
# enter error recovery mode, followed by calling
# {@link Parser#notifyErrorListeners}.</p>
#
# @param recognizer the parser instance
#
def reportUnwantedToken(self, recognizer:Parser):
if self.inErrorRecoveryMode(recognizer):
return
self.beginErrorCondition(recognizer)
t = recognizer.getCurrentToken()
tokenName = self.getTokenErrorDisplay(t)
expecting = self.getExpectedTokens(recognizer)
msg = "extraneous input " + tokenName + " expecting " \
+ expecting.toString(recognizer.literalNames, recognizer.symbolicNames)
recognizer.notifyErrorListeners(msg, t, None)
# This method is called to report a syntax error which requires the
# insertion of a missing token into the input stream. At the time this
# method is called, the missing token has not yet been inserted. When this
# method returns, {@code recognizer} is in error recovery mode.
#
# <p>This method is called when {@link #singleTokenInsertion} identifies
# single-token insertion as a viable recovery strategy for a mismatched
# input error.</p>
#
# <p>The default implementation simply returns if the handler is already in
# error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
# enter error recovery mode, followed by calling
# {@link Parser#notifyErrorListeners}.</p>
#
# @param recognizer the parser instance
#
def reportMissingToken(self, recognizer:Parser):
if self.inErrorRecoveryMode(recognizer):
return
self.beginErrorCondition(recognizer)
t = recognizer.getCurrentToken()
expecting = self.getExpectedTokens(recognizer)
msg = "missing " + expecting.toString(recognizer.literalNames, recognizer.symbolicNames) \
+ " at " + self.getTokenErrorDisplay(t)
recognizer.notifyErrorListeners(msg, t, None)
# <p>The default implementation attempts to recover from the mismatched input
# by using single token insertion and deletion as described below. If the
# recovery attempt fails, this method throws an
# {@link InputMismatchException}.</p>
#
# <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
#
# <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
# right token, however, then assume {@code LA(1)} is some extra spurious
# token and delete it. Then consume and return the next token (which was
# the {@code LA(2)} token) as the successful result of the match operation.</p>
#
# <p>This recovery strategy is implemented by {@link #singleTokenDeletion}.</p>
#
# <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
#
# <p>If current token (at {@code LA(1)}) is consistent with what could come
# after the expected {@code LA(1)} token, then assume the token is missing
# and use the parser's {@link TokenFactory} to create it on the fly. The
# "insertion" is performed by returning the created token as the successful
# result of the match operation.</p>
#
# <p>This recovery strategy is implemented by {@link #singleTokenInsertion}.</p>
#
# <p><strong>EXAMPLE</strong></p>
#
# <p>For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When
# the parser returns from the nested call to {@code expr}, it will have
# call chain:</p>
#
# <pre>
# stat → expr → atom
# </pre>
#
# and it will be trying to match the {@code ')'} at this point in the
# derivation:
#
# <pre>
# => ID '=' '(' INT ')' ('+' atom)* ';'
# ^
# </pre>
#
# The attempt to match {@code ')'} will fail when it sees {@code ';'} and
# call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'}
# is in the set of tokens that can follow the {@code ')'} token reference
# in rule {@code atom}. It can assume that you forgot the {@code ')'}.
#
def recoverInline(self, recognizer:Parser):
# SINGLE TOKEN DELETION
matchedSymbol = self.singleTokenDeletion(recognizer)
if matchedSymbol is not None:
# we have deleted the extra token.
# now, move past ttype token as if all were ok
recognizer.consume()
return matchedSymbol
# SINGLE TOKEN INSERTION
if self.singleTokenInsertion(recognizer):
return self.getMissingSymbol(recognizer)
# even that didn't work; must throw the exception
raise InputMismatchException(recognizer)
#
# This method implements the single-token insertion inline error recovery
# strategy. It is called by {@link #recoverInline} if the single-token
# deletion strategy fails to recover from the mismatched input. If this
# method returns {@code true}, {@code recognizer} will be in error recovery
# mode.
#
# <p>This method determines whether or not single-token insertion is viable by
# checking if the {@code LA(1)} input symbol could be successfully matched
# if it were instead the {@code LA(2)} symbol. If this method returns
# {@code true}, the caller is responsible for creating and inserting a
# token with the correct type to produce this behavior.</p>
#
# @param recognizer the parser instance
# @return {@code true} if single-token insertion is a viable recovery
# strategy for the current mismatched input, otherwise {@code false}
#
def singleTokenInsertion(self, recognizer:Parser):
currentSymbolType = recognizer.getTokenStream().LA(1)
# if current token is consistent with what could come after current
# ATN state, then we know we're missing a token; error recovery
# is free to conjure up and insert the missing token
atn = recognizer._interp.atn
currentState = atn.states[recognizer.state]
next = currentState.transitions[0].target
expectingAtLL2 = atn.nextTokens(next, recognizer._ctx)
if currentSymbolType in expectingAtLL2:
self.reportMissingToken(recognizer)
return True
else:
return False
# This method implements the single-token deletion inline error recovery
# strategy. It is called by {@link #recoverInline} to attempt to recover
# from mismatched input. If this method returns null, the parser and error
# handler state will not have changed. If this method returns non-null,
# {@code recognizer} will <em>not</em> be in error recovery mode since the
# returned token was a successful match.
#
# <p>If the single-token deletion is successful, this method calls
# {@link #reportUnwantedToken} to report the error, followed by
# {@link Parser#consume} to actually "delete" the extraneous token. Then,
# before returning {@link #reportMatch} is called to signal a successful
# match.</p>
#
# @param recognizer the parser instance
# @return the successfully matched {@link Token} instance if single-token
# deletion successfully recovers from the mismatched input, otherwise
# {@code null}
#
def singleTokenDeletion(self, recognizer:Parser):
nextTokenType = recognizer.getTokenStream().LA(2)
expecting = self.getExpectedTokens(recognizer)
if nextTokenType in expecting:
self.reportUnwantedToken(recognizer)
# print("recoverFromMismatchedToken deleting " \
# + str(recognizer.getTokenStream().LT(1)) \
# + " since " + str(recognizer.getTokenStream().LT(2)) \
# + " is what we want", file=sys.stderr)
recognizer.consume() # simply delete extra token
# we want to return the token we're actually matching
matchedSymbol = recognizer.getCurrentToken()
self.reportMatch(recognizer) # we know current token is correct
return matchedSymbol
else:
return None
# Conjure up a missing token during error recovery.
#
# The recognizer attempts to recover from single missing
# symbols. But, actions might refer to that missing symbol.
# For example, x=ID {f($x);}. The action clearly assumes
# that there has been an identifier matched previously and that
# $x points at that token. If that token is missing, but
# the next token in the stream is what we want we assume that
# this token is missing and we keep going. Because we
# have to return some token to replace the missing token,
# we have to conjure one up. This method gives the user control
# over the tokens returned for missing tokens. Mostly,
# you will want to create something special for identifier
# tokens. For literals such as '{' and ',', the default
# action in the parser or tree parser works. It simply creates
# a CommonToken of the appropriate type. The text will be the token.
# If you change what tokens must be created by the lexer,
# override this method to create the appropriate tokens.
#
def getMissingSymbol(self, recognizer:Parser):
currentSymbol = recognizer.getCurrentToken()
expecting = self.getExpectedTokens(recognizer)
expectedTokenType = expecting[0] # get any element
if expectedTokenType==Token.EOF:
tokenText = "<missing EOF>"
else:
name = None
if expectedTokenType < len(recognizer.literalNames):
name = recognizer.literalNames[expectedTokenType]
if name is None and expectedTokenType < len(recognizer.symbolicNames):
name = recognizer.symbolicNames[expectedTokenType]
tokenText = "<missing " + str(name) + ">"
current = currentSymbol
lookback = recognizer.getTokenStream().LT(-1)
if current.type==Token.EOF and lookback is not None:
current = lookback
return recognizer.getTokenFactory().create(current.source,
expectedTokenType, tokenText, Token.DEFAULT_CHANNEL,
-1, -1, current.line, current.column)
def getExpectedTokens(self, recognizer:Parser):
return recognizer.getExpectedTokens()
# How should a token be displayed in an error message? The default
# is to display just the text, but during development you might
# want to have a lot of information spit out. Override in that case
# to use t.toString() (which, for CommonToken, dumps everything about
# the token). This is better than forcing you to override a method in
# your token objects because you don't have to go modify your lexer
# so that it creates a new Java type.
#
def getTokenErrorDisplay(self, t:Token):
if t is None:
return "<no token>"
s = t.text
if s is None:
if t.type==Token.EOF:
s = "<EOF>"
else:
s = "<" + str(t.type) + ">"
return self.escapeWSAndQuote(s)
def escapeWSAndQuote(self, s:str):
s = s.replace("\n","\\n")
s = s.replace("\r","\\r")
s = s.replace("\t","\\t")
return "'" + s + "'"
# Compute the error recovery set for the current rule. During
# rule invocation, the parser pushes the set of tokens that can
# follow that rule reference on the stack; this amounts to
# computing FIRST of what follows the rule reference in the
# enclosing rule. See LinearApproximator.FIRST().
# This local follow set only includes tokens
# from within the rule; i.e., the FIRST computation done by
# ANTLR stops at the end of a rule.
#
# EXAMPLE
#
# When you find a "no viable alt exception", the input is not
# consistent with any of the alternatives for rule r. The best
# thing to do is to consume tokens until you see something that
# can legally follow a call to r#or* any rule that called r.
# You don't want the exact set of viable next tokens because the
# input might just be missing a token--you might consume the
# rest of the input looking for one of the missing tokens.
#
# Consider grammar:
#
# a : '[' b ']'
# | '(' b ')'
# ;
# b : c '^' INT ;
# c : ID
# | INT
# ;
#
# At each rule invocation, the set of tokens that could follow
# that rule is pushed on a stack. Here are the various
# context-sensitive follow sets:
#
# FOLLOW(b1_in_a) = FIRST(']') = ']'
# FOLLOW(b2_in_a) = FIRST(')') = ')'
# FOLLOW(c_in_b) = FIRST('^') = '^'
#
# Upon erroneous input "[]", the call chain is
#
# a -> b -> c
#
# and, hence, the follow context stack is:
#
# depth follow set start of rule execution
# 0 <EOF> a (from main())
# 1 ']' b
# 2 '^' c
#
# Notice that ')' is not included, because b would have to have
# been called from a different context in rule a for ')' to be
# included.
#
# For error recovery, we cannot consider FOLLOW(c)
# (context-sensitive or otherwise). We need the combined set of
# all context-sensitive FOLLOW sets--the set of all tokens that
# could follow any reference in the call chain. We need to
# resync to one of those tokens. Note that FOLLOW(c)='^' and if
# we resync'd to that token, we'd consume until EOF. We need to
# sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
# In this case, for input "[]", LA(1) is ']' and in the set, so we would
# not consume anything. After printing an error, rule c would
# return normally. Rule b would not find the required '^' though.
# At this point, it gets a mismatched token error and throws an
# exception (since LA(1) is not in the viable following token
# set). The rule exception handler tries to recover, but finds
# the same recovery set and doesn't consume anything. Rule b
# exits normally returning to rule a. Now it finds the ']' (and
# with the successful match exits errorRecovery mode).
#
# So, you can see that the parser walks up the call chain looking
# for the token that was a member of the recovery set.
#
# Errors are not generated in errorRecovery mode.
#
# ANTLR's error recovery mechanism is based upon original ideas:
#
# "Algorithms + Data Structures = Programs" by Niklaus Wirth
#
# and
#
# "A note on error recovery in recursive descent parsers":
# http:#portal.acm.org/citation.cfm?id=947902.947905
#
# Later, Josef Grosch had some good ideas:
#
# "Efficient and Comfortable Error Recovery in Recursive Descent
# Parsers":
# ftp:#www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
#
# Like Grosch I implement context-sensitive FOLLOW sets that are combined
# at run-time upon error to avoid overhead during parsing.
#
def getErrorRecoverySet(self, recognizer:Parser):
atn = recognizer._interp.atn
ctx = recognizer._ctx
recoverSet = IntervalSet()
while ctx is not None and ctx.invokingState>=0:
# compute what follows who invoked us
invokingState = atn.states[ctx.invokingState]
rt = invokingState.transitions[0]
follow = atn.nextTokens(rt.followState)
recoverSet.addSet(follow)
ctx = ctx.parentCtx
recoverSet.removeOne(Token.EPSILON)
return recoverSet
# Consume tokens until one matches the given token set.#
def consumeUntil(self, recognizer:Parser, set_:set):
ttype = recognizer.getTokenStream().LA(1)
while ttype != Token.EOF and not ttype in set_:
recognizer.consume()
ttype = recognizer.getTokenStream().LA(1)
#
# This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
# by immediately canceling the parse operation with a
# {@link ParseCancellationException}. The implementation ensures that the
# {@link ParserRuleContext#exception} field is set for all parse tree nodes
# that were not completed prior to encountering the error.
#
# <p>
# This error strategy is useful in the following scenarios.</p>
#
# <ul>
# <li><strong>Two-stage parsing:</strong> This error strategy allows the first
# stage of two-stage parsing to immediately terminate if an error is
# encountered, and immediately fall back to the second stage. In addition to
# avoiding wasted work by attempting to recover from errors here, the empty
# implementation of {@link BailErrorStrategy#sync} improves the performance of
# the first stage.</li>
# <li><strong>Silent validation:</strong> When syntax errors are not being
# reported or logged, and the parse result is simply ignored if errors occur,
# the {@link BailErrorStrategy} avoids wasting work on recovering from errors
# when the result will be ignored either way.</li>
# </ul>
#
# <p>
# {@code myparser.setErrorHandler(new BailErrorStrategy());}</p>
#
# @see Parser#setErrorHandler(ANTLRErrorStrategy)
#
class BailErrorStrategy(DefaultErrorStrategy):
# Instead of recovering from exception {@code e}, re-throw it wrapped
# in a {@link ParseCancellationException} so it is not caught by the
# rule function catches. Use {@link Exception#getCause()} to get the
# original {@link RecognitionException}.
#
def recover(self, recognizer:Parser, e:RecognitionException):
context = recognizer._ctx
while context is not None:
context.exception = e
context = context.parentCtx
raise ParseCancellationException(e)
# Make sure we don't attempt to recover inline; if the parser
# successfully recovers, it won't throw an exception.
#
def recoverInline(self, recognizer:Parser):
self.recover(recognizer, InputMismatchException(recognizer))
# Make sure we don't attempt to recover from problems in subrules.#
def sync(self, recognizer:Parser):
pass
del Parser
| |
"""
Module handling HTTP Requests and Connection Diagnostics
"""
from __future__ import print_function
from __future__ import absolute_import
import requests
import os
import six
import json
import logging
import platform
import xml.etree.ElementTree as ET
import warnings
import errno
# import io
from tempfile import TemporaryFile
# from datetime import datetime, timedelta
try:
# python 2
from urlparse import urlparse
from urllib import quote
except ImportError:
# python 3
from urllib.parse import urlparse
from urllib.parse import quote
from .utilities import (determine_hostname,
generate_machine_id,
write_unregistered_file,
write_registered_file,
os_release_info,
largest_spec_in_archive,
size_in_mb)
from .cert_auth import rhsmCertificate
from .constants import InsightsConstants as constants
from .url_cache import URLCache
from insights import package_info
from insights.util.canonical_facts import get_canonical_facts
warnings.simplefilter('ignore')
APP_NAME = constants.app_name
NETWORK = constants.custom_network_log_level
logger = logging.getLogger(__name__)
"""
urllib3's logging is chatty
"""
URLLIB3_LOGGER = logging.getLogger('urllib3.connectionpool')
URLLIB3_LOGGER.setLevel(logging.WARNING)
URLLIB3_LOGGER = logging.getLogger('requests.packages.urllib3.connectionpool')
URLLIB3_LOGGER.setLevel(logging.WARNING)
REQUEST_FAILED_EXCEPTIONS = (requests.ConnectionError, requests.Timeout)
# TODO: Document this, or turn it into a real option
if os.environ.get('INSIGHTS_DEBUG_HTTP'):
import httplib
httplib.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
def _host_not_found():
raise Exception("Error: failed to find host with matching machine-id. Run insights-client --status to check registration status")
def _api_request_failed(exception, message='The Insights API could not be reached.'):
logger.error(exception)
if message:
logger.error(message)
class InsightsConnection(object):
"""
Helper class to manage details about the connection
"""
def __init__(self, config):
self.config = config
self.username = self.config.username
self.password = self.config.password
# workaround while we support both legacy and plat APIs
self.cert_verify = self.config.cert_verify
if self.cert_verify is None:
# if self.config.legacy_upload:
self.cert_verify = os.path.join(
constants.default_conf_dir,
'cert-api.access.redhat.com.pem')
# else:
# self.cert_verify = True
else:
if isinstance(self.cert_verify, six.string_types):
if self.cert_verify.lower() == 'false':
self.cert_verify = False
elif self.cert_verify.lower() == 'true':
self.cert_verify = True
protocol = "https://"
self.auto_config = self.config.auto_config
# workaround while we support both legacy and plat APIs
# hack to "guess" the correct base URL if autoconfig off +
# no base_url in config
if self.config.base_url is None:
if self.config.legacy_upload:
self.base_url = protocol + constants.legacy_base_url
else:
self.base_url = protocol + constants.base_url
else:
self.base_url = protocol + self.config.base_url
# end hack. in the future, make cloud.redhat.com the default
self.upload_url = self.config.upload_url
if self.upload_url is None:
if self.config.legacy_upload:
self.upload_url = self.base_url + "/uploads"
else:
self.upload_url = self.base_url + '/ingress/v1/upload'
self.api_url = self.base_url
self.branch_info_url = self.config.branch_info_url
if self.branch_info_url is None:
# workaround for a workaround for a workaround
base_url_base = self.base_url.split('/platform')[0]
self.branch_info_url = base_url_base + '/v1/branch_info'
self.inventory_url = self.api_url + "/inventory/v1"
self.authmethod = self.config.authmethod
self.systemid = self.config.systemid or None
self.get_proxies()
self.session = self._init_session()
def _init_session(self):
"""
Set up the session, auth is handled here
"""
session = requests.Session()
session.headers = {'User-Agent': self.user_agent,
'Accept': 'application/json'}
if self.systemid is not None:
session.headers.update({'systemid': self.systemid})
if self.authmethod == "BASIC":
session.auth = (self.username, self.password)
elif self.authmethod == "CERT":
cert = rhsmCertificate.certpath()
key = rhsmCertificate.keypath()
if rhsmCertificate.exists():
session.cert = (cert, key)
else:
logger.error('ERROR: Certificates not found.')
session.verify = self.cert_verify
session.proxies = self.proxies
session.trust_env = False
if self.proxy_auth:
# HACKY
try:
# Need to make a request that will fail to get proxies set up
logger.log(NETWORK, "GET %s", self.base_url)
session.request(
"GET", self.base_url, timeout=self.config.http_timeout)
except requests.ConnectionError:
pass
# Major hack, requests/urllib3 does not make access to
# proxy_headers easy
proxy_mgr = session.adapters['https://'].proxy_manager[self.proxies['https']]
auth_map = {'Proxy-Authorization': self.proxy_auth}
proxy_mgr.proxy_headers = auth_map
proxy_mgr.connection_pool_kw['_proxy_headers'] = auth_map
conns = proxy_mgr.pools._container
for conn in conns:
connection = conns[conn]
connection.proxy_headers = auth_map
return session
def _http_request(self, url, method, log_response_text=True, **kwargs):
'''
Perform an HTTP request, net logging, and error handling
Parameters
url - URL to perform the request against
method - HTTP method, used for logging
kwargs - Rest of the args to pass to the request function
Returns
HTTP response object
'''
logger.log(NETWORK, "%s %s", method, url)
res = self.session.request(url=url, method=method, timeout=self.config.http_timeout, **kwargs)
logger.log(NETWORK, "HTTP Status: %d %s", res.status_code, res.reason)
if log_response_text or res.status_code != 200:
logger.log(NETWORK, "HTTP Response Text: %s", res.text)
return res
def get(self, url, **kwargs):
return self._http_request(url, 'GET', **kwargs)
def post(self, url, **kwargs):
return self._http_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
return self._http_request(url, 'PUT', **kwargs)
def patch(self, url, **kwargs):
return self._http_request(url, 'PATCH', **kwargs)
def delete(self, url, **kwargs):
return self._http_request(url, 'DELETE', **kwargs)
@property
def user_agent(self):
"""
Generates and returns a string suitable for use as a request user-agent
"""
import pkg_resources
core_version = "insights-core"
pkg = pkg_resources.working_set.find(pkg_resources.Requirement.parse(core_version))
if pkg is not None:
core_version = "%s %s" % (pkg.project_name, pkg.version)
else:
core_version = "Core %s" % package_info["VERSION"]
try:
from insights_client import constants as insights_client_constants
client_version = "insights-client/{0}".format(insights_client_constants.InsightsConstants.version)
except ImportError:
client_version = "insights-client"
if os.path.isfile(constants.ppidfile):
with open(constants.ppidfile, 'r') as f:
parent_process = f.read()
else:
parent_process = "unknown"
requests_version = None
pkg = pkg_resources.working_set.find(pkg_resources.Requirement.parse("requests"))
if pkg is not None:
requests_version = "%s %s" % (pkg.project_name, pkg.version)
python_version = "%s %s" % (platform.python_implementation(), platform.python_version())
os_family, os_release = os_release_info()
kernel_version = "%s %s" % (platform.system(), platform.release())
ua = "{client_version} ({core_version}; {requests_version}) {os_family} {os_release} ({python_version}; {kernel_version}); {parent_process}".format(
client_version=client_version,
core_version=core_version,
parent_process=parent_process,
python_version=python_version,
os_family=os_family,
os_release=os_release,
kernel_version=kernel_version,
requests_version=requests_version,
)
return ua
def get_proxies(self):
"""
Determine proxy configuration
"""
# Get proxy from ENV or Config
proxies = None
proxy_auth = None
no_proxy = os.environ.get('NO_PROXY')
logger.debug("NO PROXY: %s", no_proxy)
# CONF PROXY TAKES PRECEDENCE OVER ENV PROXY
conf_proxy = self.config.proxy
if ((conf_proxy is not None and
conf_proxy.lower() != 'None'.lower() and
conf_proxy != "")):
if '@' in conf_proxy:
scheme = conf_proxy.split(':')[0] + '://'
logger.debug("Proxy Scheme: %s", scheme)
location = conf_proxy.split('@')[1]
logger.debug("Proxy Location: %s", location)
username = conf_proxy.split(
'@')[0].split(':')[1].replace('/', '')
logger.debug("Proxy User: %s", username)
password = conf_proxy.split('@')[0].split(':')[2]
proxy_auth = requests.auth._basic_auth_str(username, password)
conf_proxy = scheme + location
logger.debug("CONF Proxy: %s", conf_proxy)
proxies = {"https": conf_proxy}
# HANDLE NO PROXY CONF PROXY EXCEPTION VERBIAGE
if no_proxy and conf_proxy:
logger.debug("You have environment variable NO_PROXY set "
"as well as 'proxy' set in your configuration file. "
"NO_PROXY environment variable will be ignored.")
# IF NO CONF PROXY, GET ENV PROXY AND NO PROXY
if proxies is None:
env_proxy = os.environ.get('HTTPS_PROXY')
if env_proxy:
if '@' in env_proxy:
scheme = env_proxy.split(':')[0] + '://'
logger.debug("Proxy Scheme: %s", scheme)
location = env_proxy.split('@')[1]
logger.debug("Proxy Location: %s", location)
username = env_proxy.split('@')[0].split(':')[1].replace('/', '')
logger.debug("Proxy User: %s", username)
password = env_proxy.split('@')[0].split(':')[2]
proxy_auth = requests.auth._basic_auth_str(username, password)
env_proxy = scheme + location
logger.debug("ENV Proxy: %s", env_proxy)
proxies = {"https": env_proxy}
if no_proxy:
insights_service_host = urlparse(self.base_url).hostname
logger.debug('Found NO_PROXY set. Checking NO_PROXY %s against base URL %s.', no_proxy, insights_service_host)
for no_proxy_host in no_proxy.split(','):
logger.debug('Checking %s against %s', no_proxy_host, insights_service_host)
if no_proxy_host == '*':
proxies = None
proxy_auth = None
logger.debug('Found NO_PROXY asterisk(*) wildcard, disabling all proxies.')
break
elif no_proxy_host.startswith('.') or no_proxy_host.startswith('*'):
if insights_service_host.endswith(no_proxy_host.replace('*', '')):
proxies = None
proxy_auth = None
logger.debug('Found NO_PROXY range %s matching %s', no_proxy_host, insights_service_host)
break
elif no_proxy_host == insights_service_host:
proxies = None
proxy_auth = None
logger.debug('Found NO_PROXY %s exactly matching %s', no_proxy_host, insights_service_host)
break
self.proxies = proxies
self.proxy_auth = proxy_auth
def _legacy_test_urls(self, url, method):
"""
Actually test the url
"""
# tell the api we're just testing the URL
test_flag = {'test': 'test'}
url = urlparse(url)
test_url = url.scheme + "://" + url.netloc
last_ex = None
paths = (url.path + '/', '', '/r', '/r/insights')
for ext in paths:
try:
logger.log(NETWORK, "Testing: %s", test_url + ext)
if method == "POST":
test_req = self.post(test_url + ext, data=test_flag)
elif method == "GET":
test_req = self.get(test_url + ext)
# Strata returns 405 on a GET sometimes, this isn't a big deal
if test_req.status_code in (200, 201):
logger.info(
"Successfully connected to: %s", test_url + ext)
return True
else:
logger.info("Connection failed")
return False
except requests.ConnectionError as exc:
last_ex = exc
logger.error(
"Could not successfully connect to: %s", test_url + ext)
print(exc)
if last_ex:
raise last_ex
def _test_urls(self, url, method):
'''
Test a URL
'''
if self.config.legacy_upload:
return self._legacy_test_urls(url, method)
try:
logger.log(NETWORK, 'Testing %s', url)
if method == 'POST':
test_tar = TemporaryFile(mode='rb', suffix='.tar.gz')
test_files = {
'file': ('test.tar.gz', test_tar, 'application/vnd.redhat.advisor.collection+tgz'),
'metadata': '{\"test\": \"test\"}'
}
test_req = self.post(url, files=test_files)
elif method == "GET":
test_req = self.get(url)
if test_req.status_code in (200, 201, 202):
logger.info(
"Successfully connected to: %s", url)
return True
else:
logger.info("Connection failed")
return False
except requests.ConnectionError as exc:
last_ex = exc
logger.error(
"Could not successfully connect to: %s", url)
print(exc)
if last_ex:
raise last_ex
def test_connection(self, rc=0):
"""
Test connection to Red Hat
"""
logger.debug("Proxy config: %s", self.proxies)
try:
logger.info("=== Begin Upload URL Connection Test ===")
upload_success = self._test_urls(self.upload_url, "POST")
logger.info("=== End Upload URL Connection Test: %s ===\n",
"SUCCESS" if upload_success else "FAILURE")
logger.info("=== Begin API URL Connection Test ===")
if self.config.legacy_upload:
api_success = self._test_urls(self.base_url, "GET")
else:
api_success = self._test_urls(self.base_url + '/apicast-tests/ping', 'GET')
logger.info("=== End API URL Connection Test: %s ===\n",
"SUCCESS" if api_success else "FAILURE")
if upload_success and api_success:
logger.info("Connectivity tests completed successfully")
print("See %s for more details." % self.config.logging_file)
else:
logger.info("Connectivity tests completed with some errors")
print("See %s for more details." % self.config.logging_file)
rc = 1
except requests.ConnectionError as exc:
print(exc)
logger.error('Connectivity test failed! '
'Please check your network configuration')
print('Additional information may be in %s' % self.config.logging_file)
return 1
return rc
def handle_fail_rcs(self, req):
"""
Bail out if we get a 401 and leave a message
"""
# attempt to read the HTTP response JSON message
try:
logger.log(NETWORK, "HTTP Response Message: %s", req.json()["message"])
except:
logger.debug("No HTTP Response message present.")
# handle specific status codes
if req.status_code >= 400:
logger.debug("Debug Information:\nHTTP Status Code: %s",
req.status_code)
logger.debug("HTTP Status Text: %s", req.reason)
if req.status_code == 401:
logger.error("Please ensure that the system is registered "
"with RHSM for CERT auth, or that correct "
"credentials are set in %s for BASIC auth.", self.config.conf)
logger.log(NETWORK, "HTTP Response Text: %s", req.text)
if req.status_code == 402:
# failed registration because of entitlement limit hit
logger.debug('Registration failed by 402 error.')
try:
logger.error(req.json()["message"])
except LookupError:
logger.error("Got 402 but no message")
logger.log(NETWORK, "HTTP Response Text: %s", req.text)
except:
logger.error("Got 402 but no message")
logger.log(NETWORK, "HTTP Response Text: %s", req.text)
if req.status_code == 403 and self.auto_config:
# Insights disabled in satellite
rhsm_hostname = urlparse(self.base_url).hostname
if (rhsm_hostname != 'subscription.rhn.redhat.com' and
rhsm_hostname != 'subscription.rhsm.redhat.com'):
logger.error('Please enable Insights on Satellite server '
'%s to continue.', rhsm_hostname)
if req.status_code == 412:
try:
unreg_date = req.json()["unregistered_at"]
logger.error(req.json()["message"])
write_unregistered_file(unreg_date)
except LookupError:
unreg_date = "412, but no unreg_date or message"
logger.log(NETWORK, "HTTP Response Text: %s", req.text)
except:
unreg_date = "412, but no unreg_date or message"
logger.log(NETWORK, "HTTP Response Text: %s", req.text)
if req.status_code == 413:
logger.error('Archive is too large to upload.')
if req.status_code == 415:
logger.error('Invalid content-type.')
return True
return False
def get_satellite5_info(self, branch_info):
"""
Get remote_leaf for Satellite 5 Managed box
"""
logger.debug(
"Remote branch not -1 but remote leaf is -1, must be Satellite 5")
if os.path.isfile('/etc/sysconfig/rhn/systemid'):
logger.debug("Found systemid file")
sat5_conf = ET.parse('/etc/sysconfig/rhn/systemid').getroot()
leaf_id = None
for member in sat5_conf.getiterator('member'):
if member.find('name').text == 'system_id':
logger.debug("Found member 'system_id'")
leaf_id = member.find('value').find(
'string').text.split('ID-')[1]
logger.debug("Found leaf id: %s", leaf_id)
branch_info['remote_leaf'] = leaf_id
if leaf_id is None:
logger.error("Could not determine leaf_id! Exiting!")
return False
def get_branch_info(self):
"""
Retrieve branch_info from Satellite Server
"""
# branch_info = None
# if os.path.exists(constants.cached_branch_info):
# # use cached branch info file if less than 5 minutes old
# # (failsafe, should be deleted at end of client run normally)
# logger.debug(u'Reading branch info from cached file.')
# ctime = datetime.utcfromtimestamp(
# os.path.getctime(constants.cached_branch_info))
# if datetime.utcnow() < (ctime + timedelta(minutes=5)):
# with io.open(constants.cached_branch_info, encoding='utf8', mode='r') as f:
# branch_info = json.load(f)
# return branch_info
# else:
# logger.debug(u'Cached branch info is older than 5 minutes.')
logger.debug(u'Obtaining branch information from %s',
self.branch_info_url)
response = self.get(self.branch_info_url)
if response.status_code != 200:
logger.debug("There was an error obtaining branch information.")
logger.debug(u'Bad status from server: %s', response.status_code)
logger.debug("Assuming default branch information %s" % constants.default_branch_info)
return False
branch_info = response.json()
logger.debug(u'Branch information: %s', json.dumps(branch_info))
# Determine if we are connected to Satellite 5
if ((branch_info[u'remote_branch'] != -1 and
branch_info[u'remote_leaf'] == -1)):
self.get_satellite5_info(branch_info)
# logger.debug(u'Saving branch info to file.')
# with io.open(constants.cached_branch_info, encoding='utf8', mode='w') as f:
# # json.dump is broke in py2 so use dumps
# bi_str = json.dumps(branch_info, ensure_ascii=False)
# f.write(bi_str)
self.config.branch_info = branch_info
return branch_info
# -LEGACY-
def create_system(self, new_machine_id=False):
"""
Create the machine via the API
"""
client_hostname = determine_hostname()
machine_id = generate_machine_id(new_machine_id)
branch_info = self.config.branch_info
if not branch_info:
return False
remote_branch = branch_info['remote_branch']
remote_leaf = branch_info['remote_leaf']
data = {'machine_id': machine_id,
'remote_branch': remote_branch,
'remote_leaf': remote_leaf,
'hostname': client_hostname}
if self.config.display_name is not None:
data['display_name'] = self.config.display_name
data = json.dumps(data)
post_system_url = self.api_url + '/v1/systems'
logger.debug("POST System: %s", post_system_url)
logger.debug(data)
return self.post(post_system_url,
headers={'Content-Type': 'application/json'},
data=data)
# -LEGACY-
def group_systems(self, group_name, systems):
"""
Adds an array of systems to specified group
Args:
group_name: Display name of group
systems: Array of {'machine_id': machine_id}
"""
api_group_id = None
headers = {'Content-Type': 'application/json'}
group_path = self.api_url + '/v1/groups'
group_get_path = group_path + ('?display_name=%s' % quote(group_name))
get_group = self.get(group_get_path)
if get_group.status_code == 200:
api_group_id = get_group.json()['id']
if get_group.status_code == 404:
# Group does not exist, POST to create
data = json.dumps({'display_name': group_name})
post_group = self.post(group_path,
headers=headers,
data=data)
self.handle_fail_rcs(post_group)
api_group_id = post_group.json()['id']
data = json.dumps(systems)
self.put(group_path +
('/%s/systems' % api_group_id),
headers=headers,
data=data)
# -LEGACY-
# Keeping this function around because it's not private and I don't know if anything else uses it
def do_group(self):
"""
Do grouping on register
"""
group_id = self.config.group
systems = {'machine_id': generate_machine_id()}
self.group_systems(group_id, systems)
# -LEGACY-
def _legacy_api_registration_check(self):
'''
Check registration status through API
'''
logger.debug('Checking registration status...')
machine_id = generate_machine_id()
try:
url = self.api_url + '/v1/systems/' + machine_id
res = self.get(url)
except requests.ConnectionError:
# can't connect, run connection test
logger.error('Connection timed out. Running connection test...')
self.test_connection()
return False
# had to do a quick bugfix changing this around,
# which makes the None-False-True dichotomy seem weird
# TODO: reconsider what gets returned, probably this:
# True for registered
# False for unregistered
# None for system 404
self.handle_fail_rcs(res)
try:
# check the 'unregistered_at' key of the response
unreg_status = json.loads(res.content).get('unregistered_at', 'undefined')
# set the global account number
self.config.account_number = json.loads(res.content).get('account_number', 'undefined')
except ValueError:
# bad response, no json object
return False
if unreg_status == 'undefined':
# key not found, machine not yet registered
return None
elif unreg_status is None:
# unregistered_at = null, means this machine IS registered
return True
else:
# machine has been unregistered, this is a timestamp
return unreg_status
def _fetch_system_by_machine_id(self):
'''
Get a system by machine ID
Returns
dict system exists in inventory
False system does not exist in inventory
None error connection or parsing response
'''
machine_id = generate_machine_id()
try:
# [circus music]
if self.config.legacy_upload:
url = self.base_url + '/platform/inventory/v1/hosts?insights_id=' + machine_id
else:
url = self.inventory_url + '/hosts?insights_id=' + machine_id
res = self.get(url)
except REQUEST_FAILED_EXCEPTIONS as e:
_api_request_failed(e)
return None
try:
if (self.handle_fail_rcs(res)):
return None
res_json = json.loads(res.content)
except ValueError as e:
logger.error(e)
logger.error('Could not parse response body.')
return None
if res_json['total'] == 0:
logger.debug('No hosts found with machine ID: %s', machine_id)
return False
return res_json['results']
def api_registration_check(self):
'''
Reach out to the inventory API to check
whether a machine exists.
Returns
True system exists in inventory
False system does not exist in inventory
None error connection or parsing response
'''
if self.config.legacy_upload:
return self._legacy_api_registration_check()
logger.debug('Checking registration status...')
results = self._fetch_system_by_machine_id()
if not results:
return results
logger.debug('System found.')
logger.debug('Machine ID: %s', results[0]['insights_id'])
logger.debug('Inventory ID: %s', results[0]['id'])
return True
# -LEGACY-
def _legacy_unregister(self):
"""
Unregister this system from the insights service
"""
machine_id = generate_machine_id()
try:
logger.debug("Unregistering %s", machine_id)
url = self.api_url + "/v1/systems/" + machine_id
self.delete(url)
logger.info(
"Successfully unregistered from the Red Hat Insights Service")
return True
except requests.ConnectionError as e:
logger.debug(e)
logger.error("Could not unregister this system")
return False
def unregister(self):
"""
Unregister this system from the insights service
"""
if self.config.legacy_upload:
return self._legacy_unregister()
results = self._fetch_system_by_machine_id()
if not results:
logger.info('This host could not be found.')
return False
try:
logger.debug("Unregistering host...")
url = self.inventory_url + "/hosts/" + results[0]['id']
response = self.delete(url)
response.raise_for_status()
logger.info(
"Successfully unregistered from the Red Hat Insights Service")
return True
except (requests.ConnectionError, requests.Timeout, requests.HTTPError) as e:
logger.debug(e)
logger.error("Could not unregister this system")
return False
# -LEGACY-
def register(self):
"""
Register this machine
"""
client_hostname = determine_hostname()
# This will undo a blacklist
logger.debug("API: Create system")
system = self.create_system(new_machine_id=False)
if system is False:
return ('Could not reach the Insights service to register.', '', '', '')
# If we get a 409, we know we need to generate a new machine-id
if system.status_code == 409:
system = self.create_system(new_machine_id=True)
self.handle_fail_rcs(system)
logger.debug("System: %s", system.json())
message = system.headers.get("x-rh-message", "")
# Do grouping
if self.config.group is not None:
self.do_group()
# Display registration success messasge to STDOUT and logs
if system.status_code == 201:
try:
system_json = system.json()
machine_id = system_json["machine_id"]
account_number = system_json["account_number"]
logger.info("You successfully registered %s to account %s." % (machine_id, account_number))
except:
logger.debug('Received invalid JSON on system registration.')
logger.debug('API still indicates valid registration with 201 status code.')
logger.debug(system)
logger.debug(system.json())
if self.config.group is not None:
return (message, client_hostname, self.config.group, self.config.display_name)
elif self.config.display_name is not None:
return (message, client_hostname, "None", self.config.display_name)
else:
return (message, client_hostname, "None", "")
def _archive_too_big(self, archive_file):
'''
Some helpful messaging for when the archive is too large for ingress
'''
archive_filesize = size_in_mb(
os.stat(archive_file).st_size)
logger.info("Archive is {fsize} MB which is larger than the maximum allowed size of {flimit} MB.".format(
fsize=archive_filesize, flimit=constants.archive_filesize_max))
if not self.config.core_collect:
logger.error("Cannot estimate the spec with largest filesize because core collection is not enabled. "
"Enable core collection by setting core_collect=True in %s, and attempt the upload again.", self.config.conf)
return
biggest_file = largest_spec_in_archive(archive_file)
logger.info("The largest file in the archive is %s at %s MB.", biggest_file[0], size_in_mb(biggest_file[1]))
logger.info("Please add the following spec to /etc/insights-client/file-redaction.yaml."
"According to the documentation https://access.redhat.com/articles/4511681\n\n"
"**** /etc/insights-client/file-redaction.yaml ****\n"
"# file-redaction.yaml\n"
"# Omit entire output of files\n"
"# Files can be specified either by full filename or\n"
"# by the 'symbolic_name' listed in .cache.json\n"
"files:\n"
"- %s \n**** ****", biggest_file[2])
# -LEGACY-
def _legacy_upload_archive(self, data_collected, duration):
'''
Do an HTTPS upload of the archive
'''
file_name = os.path.basename(data_collected)
try:
from insights.contrib import magic
m = magic.open(magic.MAGIC_MIME)
m.load()
mime_type = m.file(data_collected)
except ImportError:
magic = None
logger.debug('python-magic not installed, using backup function...')
from .utilities import magic_plan_b
mime_type = magic_plan_b(data_collected)
files = {
'file': (file_name, open(data_collected, 'rb'), mime_type)}
upload_url = self.upload_url + '/' + generate_machine_id()
logger.debug("Uploading %s to %s", data_collected, upload_url)
headers = {'x-rh-collection-time': str(duration)}
upload = self.post(upload_url, files=files, headers=headers)
if upload.status_code in (200, 201):
the_json = json.loads(upload.text)
else:
logger.error("Upload archive failed with status code %s", upload.status_code)
if upload.status_code == 413:
# let the user know what file is bloating the archive
self._archive_too_big(data_collected)
return upload
try:
self.config.account_number = the_json["upload"]["account_number"]
except:
self.config.account_number = None
logger.debug("Upload duration: %s", upload.elapsed)
return upload
def upload_archive(self, data_collected, content_type, duration=None):
"""
Do an HTTPS Upload of the archive
"""
if self.config.legacy_upload:
return self._legacy_upload_archive(data_collected, duration)
file_name = os.path.basename(data_collected)
upload_url = self.upload_url
c_facts = {}
try:
c_facts = get_canonical_facts()
except Exception as e:
logger.debug('Error getting canonical facts: %s', e)
if self.config.display_name:
# add display_name to canonical facts
c_facts['display_name'] = self.config.display_name
if self.config.ansible_host:
# add ansible_host to canonical facts
c_facts['ansible_host'] = self.config.ansible_host
if self.config.branch_info:
c_facts["branch_info"] = self.config.branch_info
c_facts["satellite_id"] = self.config.branch_info["remote_leaf"]
c_facts = json.dumps(c_facts)
logger.debug('Canonical facts collected:\n%s', c_facts)
files = {
'file': (file_name, open(data_collected, 'rb'), content_type),
'metadata': c_facts
}
logger.debug('content-type: %s', content_type)
logger.debug("Uploading %s to %s", data_collected, upload_url)
upload = self.post(upload_url, files=files, headers={})
logger.debug('Request ID: %s', upload.headers.get('x-rh-insights-request-id', None))
if upload.status_code in (200, 202):
# 202 from platform, no json response
logger.debug(upload.text)
# upload = registration on platform
try:
write_registered_file()
except OSError as e:
if e.errno == errno.EACCES and os.getuid() != 0:
# if permissions error as non-root, ignore
pass
else:
logger.error('Could not update local registration record: %s', str(e))
else:
logger.debug(
"Upload archive failed with status code %s",
upload.status_code)
if upload.status_code == 413:
# let the user know what file is bloating the archive
self._archive_too_big(data_collected)
return upload
logger.debug("Upload duration: %s", upload.elapsed)
return upload
# -LEGACY-
def _legacy_set_display_name(self, display_name):
machine_id = generate_machine_id()
try:
url = self.api_url + '/v1/systems/' + machine_id
res = self.get(url)
old_display_name = json.loads(res.content).get('display_name', None)
if display_name == old_display_name:
logger.debug('Display name unchanged: %s', old_display_name)
return True
res = self.put(url,
headers={'Content-Type': 'application/json'},
data=json.dumps(
{'display_name': display_name}))
if res.status_code == 200:
logger.info('System display name changed from %s to %s',
old_display_name,
display_name)
return True
elif res.status_code == 404:
logger.error('System not found. '
'Please run insights-client --register.')
return False
else:
logger.error('Unable to set display name: %s %s',
res.status_code, res.text)
return False
except REQUEST_FAILED_EXCEPTIONS + (ValueError,) as e:
_api_request_failed(e, None)
# can't connect, run connection test
return False
def set_display_name(self, display_name):
'''
Set display name of a system independently of upload.
'''
if self.config.legacy_upload:
return self._legacy_set_display_name(display_name)
system = self._fetch_system_by_machine_id()
if not system:
return system
inventory_id = system[0]['id']
req_url = self.inventory_url + '/hosts/' + inventory_id
try:
res = self.patch(req_url, json={'display_name': display_name})
except REQUEST_FAILED_EXCEPTIONS as e:
_api_request_failed(e)
return False
if (self.handle_fail_rcs(res)):
logger.error('Could not update display name.')
return False
logger.info('Display name updated to ' + display_name + '.')
return True
def set_ansible_host(self, ansible_host):
'''
Set Ansible hostname of a system independently of upload.
'''
system = self._fetch_system_by_machine_id()
if not system:
return system
inventory_id = system[0]['id']
req_url = self.inventory_url + '/hosts/' + inventory_id
try:
res = self.patch(req_url, json={'ansible_host': ansible_host})
except REQUEST_FAILED_EXCEPTIONS as e:
_api_request_failed(e)
return False
if (self.handle_fail_rcs(res)):
logger.error('Could not update Ansible hostname.')
return False
logger.info('Ansible hostname updated to ' + ansible_host + '.')
return True
def get_diagnosis(self, remediation_id=None):
'''
Reach out to the platform and fetch a diagnosis.
Spirtual successor to --to-json from the old client.
'''
# this uses machine id as identifier instead of inventory id
diag_url = self.base_url + '/remediations/v1/diagnosis/' + generate_machine_id()
params = {}
if remediation_id:
# validate this?
params['remediation'] = remediation_id
try:
res = self.get(diag_url, params=params)
except (requests.ConnectionError, requests.Timeout) as e:
_api_request_failed(e)
return False
if (self.handle_fail_rcs(res)):
logger.error('Unable to get diagnosis data: %s %s',
res.status_code, res.text)
return None
return res.json()
def _cached_get(self, url):
'''
Submits a GET request to @url, caching the result, and returning
the response body, if any. It makes the response status code opaque
to the caller.
Returns: bytes
'''
cache = URLCache("/var/cache/insights/cache.dat")
headers = {}
item = cache.get(url)
if item is not None:
headers["If-None-Match"] = item.etag
res = self.get(url, headers=headers)
if res.status_code in [requests.codes.OK, requests.codes.NOT_MODIFIED]:
if res.status_code == requests.codes.OK:
if "ETag" in res.headers and len(res.content) > 0:
cache.set(url, res.headers["ETag"], res.content)
cache.save()
item = cache.get(url)
if item is None:
return res.content
else:
return item.content
else:
return None
def get_advisor_report(self):
'''
Retrieve advisor report
'''
url = self.inventory_url + "/hosts?insights_id=%s" % generate_machine_id()
content = self._cached_get(url)
if content is None:
return None
host_details = json.loads(content)
if host_details["total"] < 1:
_host_not_found()
if host_details["total"] > 1:
raise Exception("Error: multiple hosts detected (insights_id = %s)" % generate_machine_id())
if not os.path.exists("/var/lib/insights"):
os.makedirs("/var/lib/insights", mode=0o755)
with open("/var/lib/insights/host-details.json", mode="w+b") as f:
f.write(content)
logger.debug("Wrote \"/var/lib/insights/host-details.json\"")
host_id = host_details["results"][0]["id"]
url = self.base_url + "/insights/v1/system/%s/reports/" % host_id
content = self._cached_get(url)
if content is None:
return None
with open("/var/lib/insights/insights-details.json", mode="w+b") as f:
f.write(content)
logger.debug("Wrote \"/var/lib/insights/insights-details.json\"")
return json.loads(content)
def checkin(self):
'''
Sends an ultralight check-in request containing only the Canonical Facts.
'''
logger.info("Checking in...")
try:
canonical_facts = get_canonical_facts()
except Exception as e:
logger.debug('Error getting canonical facts: %s', e)
logger.debug('Falling back to only machine ID.')
insights_id = generate_machine_id()
canonical_facts = {"insights_id": str(insights_id)}
url = self.inventory_url + "/hosts/checkin"
logger.debug("Sending check-in request to %s with %s" % (url, canonical_facts))
try:
response = self.post(url, headers={"Content-Type": "application/json"}, data=json.dumps(canonical_facts))
# Change to POST when the API is fixed.
except REQUEST_FAILED_EXCEPTIONS as exception:
_api_request_failed(exception)
return None
logger.debug("Check-in response status code %d" % response.status_code)
if response.status_code == requests.codes.CREATED:
# Remove OK when the API is fixed.
logger.info("Successfully checked in!")
return True
elif response.status_code == requests.codes.NOT_FOUND:
# Remove BAD_REQUEST when the API is fixed.
_host_not_found()
else:
logger.debug("Check-in response body %s" % response.text)
raise RuntimeError("Unknown check-in API response")
| |
"""Implementation of :class:`Domain` class. """
from sympy.core import Basic, sympify
from sympy.polys.polyerrors import (
UnificationFailed,
CoercionFailed,
DomainError,
)
from sympy.polys.domains.groundtypes import python_factorial
class Domain(object):
"""Represents an abstract domain. """
dtype = None
zero = None
one = None
has_Ring = False
has_Field = False
has_assoc_Ring = False
has_assoc_Field = False
is_ZZ = False
is_QQ = False
is_FF = False
is_CC = False
is_Poly = False
is_Frac = False
is_Exact = True
is_Numerical = False
is_Algebraic = False
is_Simple = False
is_Composite = False
has_CharacteristicZero = False
is_EX = False
rep = None
alias = None
def __init__(self):
raise NotImplementedError
def __str__(self):
return self.rep
def __repr__(self):
return str(self)
def __hash__(self):
return hash((self.__class__.__name__, self.dtype))
def __call__(self, *args):
"""Construct an element of `self` domain from `args`. """
return self.dtype(*args)
def normal(self, *args):
return self.dtype(*args)
def convert(K1, a, K0=None):
"""Convert an object `a` from `K0` to `K1`. """
if K0 is not None:
if K0.alias is not None:
method = "from_" + K0.alias
else:
method = "from_" + K0.__class__.__name__
_convert = getattr(K1, method)
if _convert is not None:
result = _convert(a, K0)
if result is not None:
return result
raise CoercionFailed("can't convert %s of type %s to %s" % (a, K0, K1))
else:
try:
if K1.of_type(a):
return a
if type(a) is int:
return K1(a)
if type(a) is long:
return K1(a)
if K1.is_Numerical and getattr(a, 'is_ground', False):
return K1.convert(a.LC())
a = sympify(a)
if isinstance(a, Basic):
return K1.from_sympy(a)
except (TypeError, ValueError):
pass
raise CoercionFailed("can't convert %s to type %s" % (a, K1))
def of_type(self, a):
"""Check if `a` is of type `dtype`. """
return type(a) == type(self.one)
def __contains__(self, a):
"""Check if `a` belongs to this domain. """
try:
self.convert(a)
except CoercionFailed:
return False
return True
def to_sympy(self, a):
"""Convert `a` to a SymPy object. """
raise NotImplementedError
def from_sympy(self, a):
"""Convert a SymPy object to `dtype`. """
raise NotImplementedError
def from_FF_python(K1, a, K0):
"""Convert `ModularInteger(int)` to `dtype`. """
return None
def from_ZZ_python(K1, a, K0):
"""Convert a Python `int` object to `dtype`. """
return None
def from_QQ_python(K1, a, K0):
"""Convert a Python `Fraction` object to `dtype`. """
return None
def from_FF_sympy(K1, a, K0):
"""Convert `ModularInteger(Integer)` to `dtype`. """
return None
def from_ZZ_sympy(K1, a, K0):
"""Convert a SymPy `Integer` object to `dtype`. """
return None
def from_QQ_sympy(K1, a, K0):
"""Convert a SymPy `Rational` object to `dtype`. """
return None
def from_FF_gmpy(K1, a, K0):
"""Convert `ModularInteger(mpz)` to `dtype`. """
return None
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY `mpz` object to `dtype`. """
return None
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY `mpq` object to `dtype`. """
return None
def from_RR_sympy(K1, a, K0):
"""Convert a SymPy `Float` object to `dtype`. """
return None
def from_RR_mpmath(K1, a, K0):
"""Convert a mpmath `mpf` object to `dtype`. """
return None
def from_AlgebraicField(K1, a, K0):
"""Convert a `ANP` object to `dtype`. """
return None
def from_PolynomialRing(K1, a, K0):
"""Convert a `DMP` object to `dtype`. """
if a.degree() <= 0:
return K1.convert(a.LC(), K0.dom)
def from_FractionField(K1, a, K0):
"""Convert a `DMF` object to `dtype`. """
return None
def from_ExpressionDomain(K1, a, K0):
"""Convert a `EX` object to `dtype`. """
return K1.from_sympy(a.ex)
def unify(K0, K1, gens=None):
"""Returns a maximal domain containg `K0` and `K1`. """
if gens is not None:
if (K0.is_Composite and (set(K0.gens) & set(gens))) or (K1.is_Composite and (set(K1.gens) & set(gens))):
raise UnificationFailed("can't unify %s with %s, given %s generators" % (K0, K1, tuple(gens)))
if K0 == K1:
return K0
if not K0.has_CharacteristicZero:
if not K1.has_CharacteristicZero:
if K0.mod == K1.mod and K0.dom == K1.dom:
return K0
elif K1.is_ZZ:
return K0
raise UnificationFailed("can't unify %s with %s" % (K0, K1))
if not K1.has_CharacteristicZero:
if K0.is_ZZ:
return K1
else:
raise UnificationFailed("can't unify %s with %s" % (K0, K1))
if K0.is_EX:
return K0
if K1.is_EX:
return K1
if not K0.is_Exact:
return K0
if not K1.is_Exact:
return K1
if K0.is_Composite:
if K1.is_Composite:
if K0.gens == K1.gens:
if K0.has_Field and K1.has_Field:
if K0.dom.has_Field:
return K0
else:
return K1
elif K0.has_Field:
if K0.dom == K1.dom:
return K0
elif K1.has_Field:
if K0.dom == K1.dom:
return K1
else:
if K0.dom.has_Field:
return K0
else:
return K1
else:
gens = set(K0.gens + K1.gens)
try:
gens = sorted(gens)
except TypeError:
gens = list(gens)
if K0.has_Field and K1.has_Field:
if K0.dom.has_Field:
return K0.__class__(K0.dom, *gens)
else:
return K1.__class__(K1.dom, *gens)
elif K0.has_Field:
if K0.dom == K1.dom:
return K0.__class__(K0.dom, *gens)
elif K1.has_Field:
if K0.dom == K1.dom:
return K1.__class__(K1.dom, *gens)
else:
if K0.dom.has_Field:
return K0.__class__(K0.dom, *gens)
else:
return K1.__class__(K1.dom, *gens)
elif K1.is_Algebraic:
return K0.__class__(K1.unify(K0.dom), *K0.gens)
else:
if K0.has_Field:
if K0.dom == K1:
return K0
else:
if K0.dom.has_Field:
return K0
else:
return K0.__class__(K1, *K0.gens)
elif K0.is_Algebraic:
if K1.is_Composite:
return K1.__class__(K0.unify(K1.dom), *K1.gens)
elif K1.is_Algebraic:
raise NotImplementedError("unification of different algebraic extensions")
elif K1.is_ZZ or K1.is_QQ:
return K0
else:
raise UnificationFailed("can't unify %s with %s" % (K0, K1))
else:
if K1.is_Composite:
if K1.has_Field:
if K0 == K1.dom:
return K1
else:
if K1.dom.has_Field:
return K1
else:
return K1.__class__(K0, *K1.gens)
elif K1.is_Algebraic:
if K0.is_ZZ or K0.is_QQ:
return K1
else:
raise UnificationFailed("can't unify %s with %s" % (K0, K1))
else:
if K0.has_Field:
return K0
else:
return K1
from sympy.polys.domains import EX
return EX
def __eq__(self, other):
"""Returns `True` if two domains are equivalent. """
return self.dtype == other.dtype
def __ne__(self, other):
"""Returns `False` if two domains are equivalent. """
return self.dtype != other.dtype
def map(self, seq):
"""Rersively apply ``self`` to all elements of ``seq``. """
result = []
for elt in seq:
if isinstance(elt, list):
result.append(self.map(elt))
else:
result.append(self(elt))
return result
def get_ring(self):
"""Returns a ring associated with `self`. """
raise DomainError('there is no ring associated with %s' % self)
def get_field(self):
"""Returns a field associated with `self`. """
raise DomainError('there is no field associated with %s' % self)
def get_exact(self):
"""Returns an exact domain associated with `self`. """
return self
def float_domain(self):
return FF
def complex_domain(self):
return CC
def __getitem__(self, gens):
"""The mathematical way do make a polynomial ring. """
if hasattr(gens, '__iter__'):
return self.poly_ring(*gens)
else:
return self.poly_ring(gens)
def poly_ring(self, *gens):
"""Returns a polynomial ring, i.e. `K[X]`. """
from sympy.polys.domains import PolynomialRing
return PolynomialRing(self, *gens)
def frac_field(self, *gens):
"""Returns a fraction field, i.e. `K(X)`. """
from sympy.polys.domains import FractionField
return FractionField(self, *gens)
def algebraic_field(self, *extension):
"""Returns an algebraic field, i.e. `K(alpha, ...)`. """
raise DomainError("can't create algebraic field over %s" % self)
def inject(self, *gens):
"""Inject generators into this domain. """
raise NotImplementedError
def is_zero(self, a):
"""Returns True if `a` is zero. """
return not a
def is_one(self, a):
"""Returns True if `a` is one. """
return a == self.one
def is_positive(self, a):
"""Returns True if `a` is positive. """
return a > 0
def is_negative(self, a):
"""Returns True if `a` is negative. """
return a < 0
def is_nonpositive(self, a):
"""Returns True if `a` is non-positive. """
return a <= 0
def is_nonnegative(self, a):
"""Returns True if `a` is non-negative. """
return a >= 0
def abs(self, a):
"""Absolute value of `a`, implies `__abs__`. """
return abs(a)
def neg(self, a):
"""Returns `a` negated, implies `__neg__`. """
return -a
def pos(self, a):
"""Returns `a` positive, implies `__pos__`. """
return +a
def add(self, a, b):
"""Sum of `a` and `b`, implies `__add__`. """
return a + b
def sub(self, a, b):
"""Difference of `a` and `b`, implies `__sub__`. """
return a - b
def mul(self, a, b):
"""Product of `a` and `b`, implies `__mul__`. """
return a * b
def pow(self, a, b):
"""Raise `a` to power `b`, implies `__pow__`. """
return a ** b
def exquo(self, a, b):
"""Exact quotient of `a` and `b`, implies something. """
raise NotImplementedError
def quo(self, a, b):
"""Quotient of `a` and `b`, implies something. """
raise NotImplementedError
def rem(self, a, b):
"""Remainder of `a` and `b`, implies `__mod__`. """
raise NotImplementedError
def div(self, a, b):
"""Division of `a` and `b`, implies something. """
raise NotImplementedError
def invert(self, a, b):
"""Returns inversion of `a mod b`, implies something. """
raise NotImplementedError
def revert(self, a):
"""Returns `a**(-1)` if possible. """
raise NotImplementedError
def numer(self, a):
"""Returns numerator of `a`. """
raise NotImplementedError
def denom(self, a):
"""Returns denominator of `a`. """
raise NotImplementedError
def gcdex(self, a, b):
"""Extended GCD of `a` and `b`. """
raise NotImplementedError
def gcd(self, a, b):
"""Returns GCD of `a` and `b`. """
raise NotImplementedError
def lcm(self, a, b):
"""Returns LCM of `a` and `b`. """
raise NotImplementedError
def log(self, a, b):
"""Returns b-base logarithm of `a`. """
raise NotImplementedError
def sqrt(self, a):
"""Returns square root of `a`. """
raise NotImplementedError
def evalf(self, a, **args):
"""Returns numerical approximation of `a`. """
return self.to_sympy(a).evalf(**args)
def real(self, a):
return a
def imag(self, a):
return self.zero
def characteristic(self):
"""Return the characteristic of this domain. """
raise NotImplementedError('characteristic()')
| |
"""
An H2OConnection represents the latest active handle to a cloud. No more than a single
H2OConnection object will be active at any one time.
"""
from __future__ import print_function
from __future__ import absolute_import
import requests
import math
import tempfile
import os
import re
import sys
import time
import subprocess
import atexit
import warnings
import site
from .display import H2ODisplay
from .h2o_logging import _is_logging, _log_rest
from .two_dim_table import H2OTwoDimTable
from .utils.shared_utils import quote
from six import iteritems, PY3
from string import ascii_lowercase, digits
from random import choice
warnings.simplefilter('always', UserWarning)
try:
warnings.simplefilter('ignore', requests.packages.urllib3.exceptions.InsecureRequestWarning)
except:
pass
__H2OCONN__ = None # the single active connection to H2O cloud
__H2O_REST_API_VERSION__ = 3 # const for the version of the rest api
class H2OConnection(object):
"""
H2OConnection is a class that represents a connection to the H2O cluster.
It is specified by an IP address and a port number.
Objects of type H2OConnection are not instantiated directly!
This class contains static methods for performing the common REST methods
GET, POST, and DELETE.
"""
__ENCODING__ = "utf-8"
__ENCODING_ERROR__ = "replace"
def __init__(self, ip, port, start_h2o, enable_assertions, license, nthreads, max_mem_size, min_mem_size, ice_root,
strict_version_check, proxy, https, insecure, username, password, max_mem_size_GB, min_mem_size_GB, proxies, size):
"""
Instantiate the package handle to the H2O cluster.
:param ip: An IP address, default is "localhost"
:param port: A port, default is 54321
:param start_h2o: A boolean dictating whether this module should start the H2O jvm. An attempt is made anyways if _connect fails.
:param enable_assertions: If start_h2o, pass `-ea` as a VM option.
:param license: If not None, is a path to a license file.
:param nthreads: Number of threads in the thread pool. This relates very closely to the number of CPUs used.
-1 means use all CPUs on the host. A positive integer specifies the number of CPUs directly. This value is only used when Python starts H2O.
:param max_mem_size: Maximum heap size (jvm option Xmx) in gigabytes.
:param min_mem_size: Minimum heap size (jvm option Xms) in gigabytes.
:param ice_root: A temporary directory (default location is determined by tempfile.mkdtemp()) to hold H2O log files.
:param strict_version_check: Setting this to False is unsupported and should only be done when advised by technical support.
:param proxy: A dictionary with keys 'ftp', 'http', 'https' and values that correspond to a proxy path.
:param https: Set this to True to use https instead of http.
:param insecure: Set this to True to disable SSL certificate checking.
:param username: Username to login with.
:param password: Password to login with.
:param max_mem_size_GB: DEPRECATED. Use max_mem_size.
:param min_mem_size_GB: DEPRECATED. Use min_mem_size.
:param proxies: DEPRECATED. Use proxy.
:param size: DEPRECATED.
:return: None
"""
port = as_int(port)
if not (isinstance(port, int) and 0 <= port <= sys.maxsize): raise ValueError("Port out of range, "+port)
if https != insecure: raise ValueError("`https` and `insecure` must both be True to enable HTTPS")
#Deprecated params
if max_mem_size_GB is not None:
warnings.warn("`max_mem_size_GB` is deprecated. Use `max_mem_size` instead.", category=DeprecationWarning)
max_mem_size = max_mem_size_GB
if min_mem_size_GB is not None:
warnings.warn("`min_mem_size_GB` is deprecated. Use `min_mem_size` instead.", category=DeprecationWarning)
min_mem_size = min_mem_size_GB
if proxies is not None:
warnings.warn("`proxies` is deprecated. Use `proxy` instead.", category=DeprecationWarning)
proxy = proxies
if size is not None:
warnings.warn("`size` is deprecated.", category=DeprecationWarning)
global __H2OCONN__
self._cld = None
self._ip = ip
self._port = port
self._proxy = proxy
self._https = https
self._insecure = insecure
self._username = username
self._password = password
self._session_id = None
self._rest_version = __H2O_REST_API_VERSION__
self._child = getattr(__H2OCONN__, "_child") if hasattr(__H2OCONN__, "_child") else None
__H2OCONN__ = self
#Give user warning if proxy environment variable is found. PUBDEV-2504
for name, value in os.environ.items():
if name.lower()[-6:] == '_proxy' and value:
warnings.warn("Proxy environment variable `" + name + "` with value `" + value + "` found. This may interfere with your H2O Connection.")
jarpaths = H2OConnection.jar_paths()
if os.path.exists(jarpaths[0]): jar_path = jarpaths[0]
elif os.path.exists(jarpaths[1]): jar_path = jarpaths[1]
elif os.path.exists(jarpaths[2]): jar_path = jarpaths[2]
elif os.path.exists(jarpaths[3]): jar_path = jarpaths[3]
elif os.path.exists(jarpaths[4]): jar_path = jarpaths[4]
else: jar_path = jarpaths[5]
try:
cld = self._connect()
except:
# try to start local jar or re-raise previous exception
if not start_h2o: raise ValueError("Cannot connect to H2O server. Please check that H2O is running at {}".format(H2OConnection.make_url("")))
print()
print()
print("No instance found at ip and port: " + ip + ":" + str(port) + ". Trying to start local jar...")
print()
print()
path_to_jar = os.path.exists(jar_path)
if path_to_jar:
if not ice_root:
ice_root = tempfile.mkdtemp()
cld = self._start_local_h2o_jar(max_mem_size, min_mem_size, enable_assertions, license, ice_root, jar_path, nthreads)
else:
print("No jar file found. Could not start local instance.")
print("Jar Paths searched: ")
for jp in jarpaths:
print("\t" + jp)
print()
raise
__H2OCONN__._cld = cld
if strict_version_check and os.environ.get('H2O_DISABLE_STRICT_VERSION_CHECK') is None:
ver_h2o = cld['version']
from .__init__ import __version__
ver_pkg = "UNKNOWN" if __version__ == "SUBST_PROJECT_VERSION" else __version__
if ver_h2o != ver_pkg:
try:
branch_name_h2o = cld['branch_name']
except KeyError:
branch_name_h2o = None
else:
branch_name_h2o = cld['branch_name']
try:
build_number_h2o = cld['build_number']
except KeyError:
build_number_h2o = None
else:
build_number_h2o = cld['build_number']
if build_number_h2o is None:
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, str(ver_pkg)))
elif build_number_h2o == 'unknown':
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Upgrade H2O and h2o-Python to latest stable version - "
"http://h2o-release.s3.amazonaws.com/h2o/latest_stable.html"
"".format(ver_h2o, str(ver_pkg)))
elif build_number_h2o == '99999':
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"This is a developer build, please contact your developer."
"".format(ver_h2o, str(ver_pkg)))
else:
raise EnvironmentError("Version mismatch. H2O is version {0}, but the h2o-python package is version {1}. "
"Install the matching h2o-Python version from - "
"http://h2o-release.s3.amazonaws.com/h2o/{2}/{3}/index.html."
"".format(ver_h2o, str(ver_pkg),branch_name_h2o, build_number_h2o))
self._session_id = H2OConnection.get_json(url_suffix="InitID")["session_key"]
H2OConnection._cluster_info()
@staticmethod
def default():
H2OConnection.__ENCODING__ = "utf-8"
H2OConnection.__ENCODING_ERROR__ = "replace"
@staticmethod
def jar_paths():
sys_prefix1 = sys_prefix2 = sys.prefix
if sys_prefix1.startswith('/Library'): sys_prefix2 = '/System'+sys_prefix1
elif sys_prefix1.startswith('/System'): sys_prefix2 = sys_prefix1.split('/System')[1]
return [os.path.join(sys_prefix1, "h2o_jar", "h2o.jar"),
os.path.join(os.path.sep,"usr","local","h2o_jar","h2o.jar"),
os.path.join(sys_prefix1, "local", "h2o_jar", "h2o.jar"),
os.path.join(site.USER_BASE, "h2o_jar", "h2o.jar"),
os.path.join(sys_prefix2, "h2o_jar", "h2o.jar"),
os.path.join(sys_prefix2, "h2o_jar", "h2o.jar"),
]
@staticmethod
def _cluster_info():
global __H2OCONN__
cld = __H2OCONN__._cld
ncpus = sum([n['num_cpus'] for n in cld['nodes']])
allowed_cpus = sum([n['cpus_allowed'] for n in cld['nodes']])
mfree = sum([n['free_mem'] for n in cld['nodes']])
cluster_health = all([n['healthy'] for n in cld['nodes']])
ip = "127.0.0.1" if __H2OCONN__._ip=="localhost" else __H2OCONN__._ip
cluster_info = [
["H2O cluster uptime: ", get_human_readable_time(cld["cloud_uptime_millis"])],
["H2O cluster version: ", cld["version"]],
["H2O cluster name: ", cld["cloud_name"]],
["H2O cluster total nodes: ", cld["cloud_size"]],
["H2O cluster total free memory: ", get_human_readable_size(mfree)],
["H2O cluster total cores: ", str(ncpus)],
["H2O cluster allowed cores: ", str(allowed_cpus)],
["H2O cluster healthy: ", str(cluster_health)],
["H2O Connection ip: ", ip],
["H2O Connection port: ", __H2OCONN__._port],
["H2O Connection proxy: ", __H2OCONN__._proxy],
["Python Version: ", sys.version.split()[0]],
]
__H2OCONN__._cld = H2OConnection.get_json(url_suffix="Cloud") # update the cached version of cld
H2ODisplay(cluster_info)
def _connect(self, size=1, max_retries=5, print_dots=False):
"""
Does not actually "connect", instead simply tests that the cluster can be reached,
is of a certain size, and is taking basic status commands.
:param size: The number of H2O instances in the cloud.
:return: The JSON response from a "stable" cluster.
"""
retries = 0
while True:
retries += 1
if print_dots:
self._print_dots(retries)
try:
cld = H2OConnection.get_json(url_suffix="Cloud")
if not cld['cloud_healthy']:
raise ValueError("Cluster reports unhealthy status", cld)
if cld['cloud_size'] >= size and cld['consensus']:
if print_dots: print(" Connection successful!")
return cld
except EnvironmentError:
pass
# Cloud too small or voting in progress; sleep; try again
time.sleep(0.1)
if retries > max_retries:
raise EnvironmentError("Max retries exceeded. Could not establish link to the H2O cloud @ " + str(self._ip) + ":" + str(self._port))
def _print_dots(self, retries):
sys.stdout.write("\rStarting H2O JVM and connecting: {}".format("." * retries))
sys.stdout.flush()
def _start_local_h2o_jar(self, mmax, mmin, ea, license, ice, jar_path, nthreads):
command = H2OConnection._check_java()
if license:
if not os.path.exists(license):
raise ValueError("License file not found (" + license + ")")
if not ice:
raise ValueError("`ice_root` must be specified")
stdout = open(H2OConnection._tmp_file("stdout"), 'w')
stderr = open(H2OConnection._tmp_file("stderr"), 'w')
print("Using ice_root: " + ice)
print()
jver = subprocess.check_output([command, "-version"], stderr=subprocess.STDOUT)
if PY3: jver = str(jver, H2OConnection.__ENCODING__)
print()
print("Java Version: " + jver)
print()
if "GNU libgcj" in jver:
raise ValueError("Sorry, GNU Java is not supported for H2O.\n"+
"Please download the latest Java SE JDK 7 from the following URL:\n"+
"http://www.oracle.com/technetwork/java/javase/downloads/jdk7-downloads-1880260.html")
if "Client VM" in jver:
print("WARNING: ")
print("You have a 32-bit version of Java. H2O works best with 64-bit Java.")
print("Please download the latest Java SE JDK 7 from the following URL:")
print("http://www.oracle.com/technetwork/java/javase/downloads/jdk7-downloads-1880260.html")
print()
vm_opts = []
if mmin: vm_opts += ["-Xms{}g".format(mmin)]
if mmax: vm_opts += ["-Xmx{}g".format(mmax)]
if ea: vm_opts += ["-ea"]
h2o_opts = ["-verbose:gc",
"-XX:+PrintGCDetails",
"-XX:+PrintGCTimeStamps",
"-jar", jar_path,
"-name", "H2O_started_from_python_"
+ re.sub("[^A-Za-z0-9]", "_",
(os.getenv("USERNAME") if sys.platform == "win32" else os.getenv("USER")) or "unknownUser")
+ "_" + "".join([choice(ascii_lowercase) for _ in range(3)] + [choice(digits) for _ in range(3)]),
"-ip", "127.0.0.1",
"-port", "54321",
"-ice_root", ice,
]
if nthreads > 0: h2o_opts += ["-nthreads", str(nthreads)]
if license: h2o_opts += ["-license", license]
cmd = [command] + vm_opts + h2o_opts
cwd = os.path.abspath(os.getcwd())
if sys.platform == "win32":
self._child = subprocess.Popen(args=cmd,stdout=stdout,stderr=stderr,cwd=cwd,creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
self._child = subprocess.Popen(args=cmd, stdout=stdout, stderr=stderr, cwd=cwd, preexec_fn=os.setsid)
cld = self._connect(1, 30, True)
return cld
@staticmethod
def _check_java():
# *WARNING* some over-engineering follows... :{
# is java in PATH?
if H2OConnection._pwhich("java"):
return H2OConnection._pwhich("java")
# check if JAVA_HOME is set (for windoz)
if os.getenv("JAVA_HOME"):
return os.path.join(os.getenv("JAVA_HOME"), "bin", "java.exe")
# check /Program Files/ and /Program Files (x86)/ if os is windoz
if sys.platform == "win32":
program_folder = os.path.join("C:", "{}", "Java")
program_folders = [program_folder.format("Program Files"),
program_folder.format("Program Files (x86)")]
# check both possible program files...
for folder in program_folders:
# hunt down the jdk directory
possible_jdk_dir = [d for d in folder if 'jdk' in d]
# if got a non-empty list of jdk directory candidates
if len(possible_jdk_dir) != 0:
# loop over and check if the java.exe exists
for jdk in possible_jdk_dir:
path = os.path.join(folder, jdk, "bin", "java.exe")
if os.path.exists(path):
return path
# check for JRE and warn
for folder in program_folders:
path = os.path.join(folder, "jre7", "bin", "java.exe")
if os.path.exists(path):
raise ValueError("Found JRE at " + path + "; but H2O requires the JDK to run.")
else:
raise ValueError("Cannot find Java. Please install the latest JDK from\n"
+"http://www.oracle.com/technetwork/java/javase/downloads/index.html" )
@staticmethod
def _pwhich(e):
"""
POSIX style which
"""
ok = os.X_OK
if e:
if os.access(e, ok):
return e
for path in os.getenv('PATH').split(os.pathsep):
full_path = os.path.join(path, e)
if os.access(full_path, ok):
return full_path
return None
@staticmethod
def _tmp_file(type):
usr = re.sub("[^A-Za-z0-9]", "_", (os.getenv("USERNAME") if sys.platform == "win32" else os.getenv("USER")) or "unknownUser")
if type == "stdout":
path = os.path.join(tempfile.mkdtemp(), "h2o_{}_started_from_python.out".format(usr))
print("JVM stdout: " + path)
return path
if type == "stderr":
path = os.path.join(tempfile.mkdtemp(), "h2o_{}_started_from_python.err".format(usr))
print("JVM stderr: " + path)
return path
if type == "pid":
return os.path.join(tempfile.mkdtemp(), "h2o_{}_started_from_python.pid".format(usr))
raise ValueError("Unkown type in H2OConnection._tmp_file call: " + type)
@staticmethod
def _shutdown(conn, prompt):
"""
Shut down the specified instance. All data will be lost.
This method checks if H2O is running at the specified IP address and port, and if it is, shuts down that H2O
instance.
:param conn: An H2OConnection object containing the IP address and port of the server running H2O.
:param prompt: A logical value indicating whether to prompt the user before shutting down the H2O server.
:return: None
"""
global __H2OCONN__
if conn is None: raise ValueError("There is no H2O instance running.")
try:
if not conn.cluster_is_up(conn): raise ValueError("There is no H2O instance running at ip: {0} and port: "
"{1}".format(conn.ip(), conn.port()))
except:
#H2O is already shutdown on the java side
ip = conn.ip()
port = conn.port()
__H2OCONN__= None
raise ValueError("The H2O instance running at {0}:{1} has already been shutdown.".format(ip, port))
if not isinstance(prompt, bool): raise ValueError("`prompt` must be TRUE or FALSE")
if prompt:
question = "Are you sure you want to shutdown the H2O instance running at {0}:{1} (Y/N)? ".format(conn.ip(), conn.port())
response = input(question) if PY3 else raw_input(question)
else: response = "Y"
if response == "Y" or response == "y":
conn.post(url_suffix="Shutdown")
__H2OCONN__ = None #so that the "Did you run `h2o.init()`" ValueError is triggered
@staticmethod
def rest_version(): return __H2OCONN__._rest_version
@staticmethod
def session_id(): return __H2OCONN__._session_id
@staticmethod
def port(): return __H2OCONN__._port
@staticmethod
def ip(): return __H2OCONN__._ip
@staticmethod
def https(): return __H2OCONN__._https
@staticmethod
def username(): return __H2OCONN__._username
@staticmethod
def password(): return __H2OCONN__._password
@staticmethod
def insecure(): return __H2OCONN__._insecure
@staticmethod
def current_connection(): return __H2OCONN__
@staticmethod
def check_conn():
if not __H2OCONN__:
raise EnvironmentError("No active connection to an H2O cluster. Try calling `h2o.init()`")
return __H2OCONN__
@staticmethod
def cluster_is_up(conn):
"""
Determine if an H2O cluster is up or not
:param conn: An H2OConnection object containing the IP address and port of the server running H2O.
:return: TRUE if the cluster is up; FALSE otherwise
"""
if not isinstance(conn, H2OConnection): raise ValueError("`conn` must be an H2OConnection object")
rv = conn.current_connection()._attempt_rest(url=("https" if conn.https() else "http") +"://{0}:{1}/".format(conn.ip(), conn.port()), method="GET",
post_body="", file_upload_info="")
if rv.status_code == 401: warnings.warn("401 Unauthorized Access. Did you forget to provide a username and password?")
return rv.status_code == 200 or rv.status_code == 301
"""
Below is the REST implementation layer:
_attempt_rest -- GET, POST, DELETE
_do_raw_rest
get
post
get_json
post_json
All methods are static and rely on an active __H2OCONN__ object.
"""
@staticmethod
def make_url(url_suffix, _rest_version=None):
scheme = "https" if H2OConnection.https() else "http"
_rest_version = _rest_version or H2OConnection.rest_version()
return "{}://{}:{}/{}/{}".format(scheme,H2OConnection.ip(),H2OConnection.port(),_rest_version,url_suffix)
@staticmethod
def get(url_suffix, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._do_raw_rest(url_suffix, "GET", None, **kwargs)
@staticmethod
def post(url_suffix, file_upload_info=None, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._do_raw_rest(url_suffix, "POST", file_upload_info, **kwargs)
@staticmethod
def delete(url_suffix, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._do_raw_rest(url_suffix, "DELETE", None, **kwargs)
@staticmethod
def get_json(url_suffix, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._rest_json(url_suffix, "GET", None, **kwargs)
@staticmethod
def post_json(url_suffix, file_upload_info=None, **kwargs):
if __H2OCONN__ is None:
raise ValueError("No h2o connection. Did you run `h2o.init()` ?")
return __H2OCONN__._rest_json(url_suffix, "POST", file_upload_info, **kwargs)
def _rest_json(self, url_suffix, method, file_upload_info, **kwargs):
raw_txt = self._do_raw_rest(url_suffix, method, file_upload_info, **kwargs)
return self._process_tables(raw_txt.json())
# Massage arguments into place, call _attempt_rest
def _do_raw_rest(self, url_suffix, method, file_upload_info, **kwargs):
if not url_suffix:
raise ValueError("No url suffix supplied.")
# allow override of REST version, currently used for Rapids which is /99
if '_rest_version' in kwargs:
_rest_version = kwargs['_rest_version']
del kwargs['_rest_version']
else:
_rest_version = self._rest_version
url = H2OConnection.make_url(url_suffix,_rest_version)
query_string = ""
for k,v in iteritems(kwargs):
if v is None: continue #don't send args set to None so backend defaults take precedence
if isinstance(v, list):
x = '['
for l in v:
if isinstance(l,list):
x += '['
x += ','.join([str(e) if PY3 else str(e).encode(H2OConnection.__ENCODING__, errors=H2OConnection.__ENCODING_ERROR__) for e in l])
x += ']'
else:
x += str(l) if PY3 else str(l).encode(H2OConnection.__ENCODING__, errors=H2OConnection.__ENCODING_ERROR__)
x += ','
x = x[:-1]
x += ']'
else:
x = str(v) if PY3 else str(v).encode(H2OConnection.__ENCODING__, errors=H2OConnection.__ENCODING_ERROR__)
query_string += k+"="+quote(x)+"&"
query_string = query_string[:-1] # Remove trailing extra &
post_body = ""
if not file_upload_info:
if method == "POST":
post_body = query_string
elif query_string != '':
url = "{}?{}".format(url, query_string)
else:
if not method == "POST":
raise ValueError("Received file upload info and expected method to be POST. Got: " + str(method))
if query_string != '':
url = "{}?{}".format(url, query_string)
if _is_logging():
_log_rest("------------------------------------------------------------\n")
_log_rest("\n")
_log_rest("Time: {0}\n".format(time.strftime('Y-%m-%d %H:%M:%OS3')))
_log_rest("\n")
_log_rest("{0} {1}\n".format(method, url))
_log_rest("postBody: {0}\n".format(post_body))
global _rest_ctr; _rest_ctr = _rest_ctr+1
begin_time_seconds = time.time()
http_result = self._attempt_rest(url, method, post_body, file_upload_info)
end_time_seconds = time.time()
elapsed_time_seconds = end_time_seconds - begin_time_seconds
elapsed_time_millis = elapsed_time_seconds * 1000
if not http_result.ok:
detailed_error_msgs = []
try:
result = http_result.json()
if 'messages' in result.keys():
detailed_error_msgs = '\n'.join([m['message'] for m in result['messages'] if m['message_type'] in ['ERRR']])
elif 'exception_msg' in result.keys():
detailed_error_msgs = result['exception_msg']
except ValueError:
pass
raise EnvironmentError(("h2o-py got an unexpected HTTP status code:\n {} {} (method = {}; url = {}). \n"+ \
"detailed error messages: {}")
.format(http_result.status_code,http_result.reason,method,url,detailed_error_msgs))
if _is_logging():
_log_rest("\n")
_log_rest("httpStatusCode: {0}\n".format(http_result.status_code))
_log_rest("httpStatusMessage: {0}\n".format(http_result.reason))
_log_rest("millis: {0}\n".format(elapsed_time_millis))
_log_rest("\n")
_log_rest("{0}\n".format(http_result.json()))
_log_rest("\n")
return http_result
# Low level request call
def _attempt_rest(self, url, method, post_body, file_upload_info):
auth = (self._username, self._password)
verify = not self._insecure
headers = {'User-Agent': 'H2O Python client/'+sys.version.replace('\n','')}
try:
if method == "GET":
return requests.get(url, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
elif file_upload_info:
files = {file_upload_info["file"] : open(file_upload_info["file"], "rb")}
return requests.post(url, files=files, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
elif method == "POST":
headers["Content-Type"] = "application/x-www-form-urlencoded"
return requests.post(url, data=post_body, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
elif method == "DELETE":
return requests.delete(url, headers=headers, proxies=self._proxy, auth=auth, verify=verify)
else:
raise ValueError("Unknown HTTP method " + method)
except requests.ConnectionError as e:
raise EnvironmentError("h2o-py encountered an unexpected HTTP error:\n {}".format(e))
# TODO:
# @staticmethod
# def _process_matrices(x=None):
# if x:
# if isinstance(x, "dict"):
#
# return x
@staticmethod
def _process_tables(x=None):
if x:
if isinstance(x, dict):
has_meta = "__meta" in x
has_schema_type = has_meta and "schema_type" in x["__meta"]
have_table = has_schema_type and x["__meta"]["schema_type"] == "TwoDimTable"
if have_table:
col_formats = [c["format"] for c in x["columns"]]
table_header = x["name"]
table_descr = x["description"]
col_types = [c["type"] for c in x["columns"]]
col_headers = [c["name"] for c in x["columns"]]
row_headers = ["" for i in range(len(col_headers))]
cell_values = x["data"]
tbl = H2OTwoDimTable(row_header=row_headers, col_header=col_headers,
col_types=col_types, table_header=table_header,
raw_cell_values=cell_values,
col_formats=col_formats,table_description=table_descr)
x = tbl
else:
for k in x:
x[k] = H2OConnection._process_tables(x[k])
if isinstance(x, list):
for it in range(len(x)):
x[it] = H2OConnection._process_tables(x[it])
return x
global _rest_ctr
_rest_ctr = 0
@staticmethod
def rest_ctr(): global _rest_ctr; return _rest_ctr
# On exit, close the session to allow H2O to cleanup any temps
def end_session():
try:
H2OConnection.delete(url_suffix="InitID")
print("Sucessfully closed the H2O Session.")
except:
pass
def get_human_readable_size(num):
exp_str = [(0, 'B'), (10, 'KB'), (20, 'MB'), (30, 'GB'), (40, 'TB'), (50, 'PB'), ]
i = 0
rounded_val = 0
while i + 1 < len(exp_str) and num >= (2 ** exp_str[i + 1][0]):
i += 1
rounded_val = round(float(num) / 2 ** exp_str[i][0], 2)
return '%s %s' % (rounded_val, exp_str[i][1])
def get_human_readable_time(epochTimeMillis):
days = epochTimeMillis/(24*60*60*1000.0)
hours = (days-math.floor(days))*24
minutes = (hours-math.floor(hours))*60
seconds = (minutes-math.floor(minutes))*60
milliseconds = (seconds-math.floor(seconds))*1000
duration_vec = [int(math.floor(t)) for t in [days,hours,minutes,seconds,milliseconds]]
names_duration_vec = ["days","hours","minutes","seconds","milliseconds"]
duration_dict = dict(zip(names_duration_vec, duration_vec))
readable_time = ""
for name in names_duration_vec:
if duration_dict[name] > 0:
readable_time += str(duration_dict[name]) + " " + name + " "
return readable_time
def is_int(possible_int):
try:
int(possible_int)
return True
except ValueError:
return False
def as_int(the_int):
if not is_int(the_int):
raise ValueError("Not a valid int value: " + str(the_int))
return int(the_int)
def _kill_jvm_fork():
global __H2OCONN__
if __H2OCONN__ is not None:
if __H2OCONN__._child:
__H2OCONN__._child.kill()
print("Successfully stopped H2O JVM started by the h2o python module.")
atexit.register(_kill_jvm_fork)
atexit.register(end_session)
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import six
from keystone import assignment
from keystone.common import controller
from keystone.common import dependency
from keystone import config
from keystone import exception
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
LOG = log.getLogger(__name__)
CONF = config.CONF
def _trustor_only(context, trust, user_id):
if user_id != trust.get('trustor_user_id'):
raise exception.Forbidden()
def _trustor_trustee_only(trust, user_id):
if (user_id != trust.get('trustee_user_id') and
user_id != trust.get('trustor_user_id')):
raise exception.Forbidden()
def _admin_trustor_only(context, trust, user_id):
if user_id != trust.get('trustor_user_id') and not context['is_admin']:
raise exception.Forbidden()
@dependency.requires('assignment_api', 'identity_api', 'trust_api',
'token_api')
class TrustV3(controller.V3Controller):
collection_name = "trusts"
member_name = "trust"
@classmethod
def base_url(cls, context, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
# NOTE(stevemar): Overriding path to /OS-TRUST/trusts so that
# V3Controller.base_url handles setting the self link correctly.
path = '/OS-TRUST/' + cls.collection_name
return super(TrustV3, cls).base_url(context, path=path)
def _get_user_id(self, context):
if 'token_id' in context:
token_id = context['token_id']
token = self.token_api.get_token(token_id)
user_id = token['user']['id']
return user_id
return None
def get_trust(self, context, trust_id):
user_id = self._get_user_id(context)
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
_trustor_trustee_only(trust, user_id)
self._fill_in_roles(context, trust,
self.assignment_api.list_roles())
return TrustV3.wrap_member(context, trust)
def _fill_in_roles(self, context, trust, all_roles):
if trust.get('expires_at') is not None:
trust['expires_at'] = (timeutils.isotime
(trust['expires_at'],
subsecond=True))
if 'roles' not in trust:
trust['roles'] = []
trust_full_roles = []
for trust_role in trust['roles']:
if isinstance(trust_role, six.string_types):
trust_role = {'id': trust_role}
matching_roles = [x for x in all_roles
if x['id'] == trust_role['id']]
if matching_roles:
full_role = assignment.controllers.RoleV3.wrap_member(
context, matching_roles[0])['role']
trust_full_roles.append(full_role)
trust['roles'] = trust_full_roles
trust['roles_links'] = {
'self': (self.base_url(context) + "/%s/roles" % trust['id']),
'next': None,
'previous': None}
def _clean_role_list(self, context, trust, all_roles):
trust_roles = []
all_role_names = dict((r['name'], r) for r in all_roles)
for role in trust.get('roles', []):
if 'id' in role:
trust_roles.append({'id': role['id']})
elif 'name' in role:
rolename = role['name']
if rolename in all_role_names:
trust_roles.append({'id':
all_role_names[rolename]['id']})
else:
raise exception.RoleNotFound("role %s is not defined" %
rolename)
else:
raise exception.ValidationError(attribute='id or name',
target='roles')
return trust_roles
@controller.protected()
def create_trust(self, context, trust=None):
"""Create a new trust.
The user creating the trust must be the trustor.
"""
# TODO(ayoung): instead of raising ValidationError on the first
# problem, return a collection of all the problems.
if not trust:
raise exception.ValidationError(attribute='trust',
target='request')
self._require_attribute(trust, 'impersonation')
self._require_attribute(trust, 'trustee_user_id')
if trust.get('project_id') and not trust.get('roles'):
raise exception.Forbidden(
_('At least one role should be specified.'))
try:
user_id = self._get_user_id(context)
_trustor_only(context, trust, user_id)
# confirm that the trustee exists
self.identity_api.get_user(trust['trustee_user_id'])
all_roles = self.assignment_api.list_roles()
clean_roles = self._clean_role_list(context, trust, all_roles)
if trust.get('project_id'):
user_role = self.assignment_api.get_roles_for_user_and_project(
user_id,
trust['project_id'])
else:
user_role = []
for trust_role in clean_roles:
matching_roles = [x for x in user_role
if x == trust_role['id']]
if not matching_roles:
raise exception.RoleNotFound(role_id=trust_role['id'])
if trust.get('expires_at') is not None:
if not trust['expires_at'].endswith('Z'):
trust['expires_at'] += 'Z'
try:
trust['expires_at'] = (timeutils.parse_isotime
(trust['expires_at']))
except ValueError:
raise exception.ValidationTimeStampError()
trust_id = uuid.uuid4().hex
new_trust = self.trust_api.create_trust(trust_id,
trust,
clean_roles)
self._fill_in_roles(context, new_trust, all_roles)
return TrustV3.wrap_member(context, new_trust)
except KeyError as e:
raise exception.ValidationError(attribute=e.args[0],
target='trust')
@controller.protected()
def list_trusts(self, context):
query = context['query_string']
trusts = []
if not query:
self.assert_admin(context)
trusts += self.trust_api.list_trusts()
if 'trustor_user_id' in query:
user_id = query['trustor_user_id']
calling_user_id = self._get_user_id(context)
if user_id != calling_user_id:
raise exception.Forbidden()
trusts += (self.trust_api.
list_trusts_for_trustor(user_id))
if 'trustee_user_id' in query:
user_id = query['trustee_user_id']
calling_user_id = self._get_user_id(context)
if user_id != calling_user_id:
raise exception.Forbidden()
trusts += self.trust_api.list_trusts_for_trustee(user_id)
for trust in trusts:
# get_trust returns roles, list_trusts does not
# It seems in some circumstances, roles does not
# exist in the query response, so check first
if 'roles' in trust:
del trust['roles']
if trust.get('expires_at') is not None:
trust['expires_at'] = (timeutils.isotime
(trust['expires_at'],
subsecond=True))
return TrustV3.wrap_collection(context, trusts)
@controller.protected()
def delete_trust(self, context, trust_id):
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_admin_trustor_only(context, trust, user_id)
self.trust_api.delete_trust(trust_id)
userid = trust['trustor_user_id']
self.token_api.delete_tokens(userid, trust_id=trust_id)
@controller.protected()
def list_roles_for_trust(self, context, trust_id):
trust = self.get_trust(context, trust_id)['trust']
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
return {'roles': trust['roles'],
'links': trust['roles_links']}
@controller.protected()
def check_role_for_trust(self, context, trust_id, role_id):
"""Checks if a role has been assigned to a trust."""
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
if not any(role['id'] == role_id for role in trust['roles']):
raise exception.RoleNotFound(role_id=role_id)
@controller.protected()
def get_role_for_trust(self, context, trust_id, role_id):
"""Get a role that has been assigned to a trust."""
self.check_role_for_trust(context, trust_id, role_id)
role = self.assignment_api.get_role(role_id)
return assignment.controllers.RoleV3.wrap_member(context, role)
| |
from os.path import abspath, basename, join, exists
from os.path import dirname
from os.path import relpath
from os import listdir, sep
from re import findall
from io import open
from ..staging import COMMAND_VERSION_FILENAME
from ..action_mapper import FileActionMapper
from ..action_mapper import path_type
from ..action_mapper import MessageAction
from ..util import PathHelper
from ..util import directory_files
from logging import getLogger
log = getLogger(__name__)
def submit_job(client, client_job_description, job_config=None):
"""
"""
file_stager = FileStager(client, client_job_description, job_config)
rebuilt_command_line = file_stager.get_command_line()
job_id = file_stager.job_id
launch_kwds = dict(
command_line=rebuilt_command_line,
dependencies_description=client_job_description.dependencies_description,
env=client_job_description.env,
)
if file_stager.job_config:
launch_kwds["job_config"] = file_stager.job_config
remote_staging = {}
remote_staging_actions = file_stager.transfer_tracker.remote_staging_actions
if remote_staging_actions:
remote_staging["setup"] = remote_staging_actions
# Somehow make the following optional.
remote_staging["action_mapper"] = file_stager.action_mapper.to_dict()
remote_staging["client_outputs"] = client_job_description.client_outputs.to_dict()
if remote_staging:
launch_kwds["remote_staging"] = remote_staging
client.launch(**launch_kwds)
return job_id
class FileStager(object):
"""
Objects of the FileStager class interact with an Pulsar client object to
stage the files required to run jobs on a remote Pulsar server.
**Parameters**
client : JobClient
Pulsar client object.
client_job_description : client_job_description
Description of client view of job to stage and execute remotely.
"""
def __init__(self, client, client_job_description, job_config):
"""
"""
self.client = client
self.command_line = client_job_description.command_line
self.config_files = client_job_description.config_files
self.input_files = client_job_description.input_files
self.output_files = client_job_description.output_files
if client_job_description.tool is not None:
self.tool_id = client_job_description.tool.id
self.tool_version = client_job_description.tool.version
self.tool_dir = abspath(client_job_description.tool.tool_dir)
else:
self.tool_id = None
self.tool_version = None
self.tool_dir = None
self.working_directory = client_job_description.working_directory
self.version_file = client_job_description.version_file
self.arbitrary_files = client_job_description.arbitrary_files
self.rewrite_paths = client_job_description.rewrite_paths
# Setup job inputs, these will need to be rewritten before
# shipping off to remote Pulsar server.
self.job_inputs = JobInputs(self.command_line, self.config_files)
self.action_mapper = FileActionMapper(client)
self.__handle_setup(job_config)
self.transfer_tracker = TransferTracker(client, self.path_helper, self.action_mapper, self.job_inputs, rewrite_paths=self.rewrite_paths)
self.__initialize_referenced_tool_files()
if self.rewrite_paths:
self.__initialize_referenced_arbitrary_files()
self.__upload_tool_files()
self.__upload_input_files()
self.__upload_working_directory_files()
self.__upload_arbitrary_files()
if self.rewrite_paths:
self.__initialize_output_file_renames()
self.__initialize_task_output_file_renames()
self.__initialize_config_file_renames()
self.__initialize_version_file_rename()
self.__handle_rewrites()
self.__upload_rewritten_config_files()
def __handle_setup(self, job_config):
if not job_config:
job_config = self.client.setup(self.tool_id, self.tool_version)
self.new_working_directory = job_config['working_directory']
self.new_outputs_directory = job_config['outputs_directory']
# Default configs_directory to match remote working_directory to mimic
# behavior of older Pulsar servers.
self.new_configs_directory = job_config.get('configs_directory', self.new_working_directory)
self.remote_separator = self.__parse_remote_separator(job_config)
self.path_helper = PathHelper(self.remote_separator)
# If remote Pulsar server assigned job id, use that otherwise
# just use local job_id assigned.
galaxy_job_id = self.client.job_id
self.job_id = job_config.get('job_id', galaxy_job_id)
if self.job_id != galaxy_job_id:
# Remote Pulsar server assigned an id different than the
# Galaxy job id, update client to reflect this.
self.client.job_id = self.job_id
self.job_config = job_config
def __parse_remote_separator(self, job_config):
separator = job_config.get("system_properties", {}).get("separator", None)
if not separator: # Legacy Pulsar
separator = job_config["path_separator"] # Poorly named
return separator
def __initialize_referenced_tool_files(self):
self.referenced_tool_files = self.job_inputs.find_referenced_subfiles(self.tool_dir)
def __initialize_referenced_arbitrary_files(self):
referenced_arbitrary_path_mappers = dict()
for mapper in self.action_mapper.unstructured_mappers():
mapper_pattern = mapper.to_pattern()
# TODO: Make more sophisticated, allow parent directories,
# grabbing sibbling files based on patterns, etc...
paths = self.job_inputs.find_pattern_references(mapper_pattern)
for path in paths:
if path not in referenced_arbitrary_path_mappers:
referenced_arbitrary_path_mappers[path] = mapper
for path, mapper in referenced_arbitrary_path_mappers.items():
action = self.action_mapper.action(path, path_type.UNSTRUCTURED, mapper)
unstructured_map = action.unstructured_map(self.path_helper)
self.arbitrary_files.update(unstructured_map)
def __upload_tool_files(self):
for referenced_tool_file in self.referenced_tool_files:
self.transfer_tracker.handle_transfer(referenced_tool_file, path_type.TOOL)
def __upload_arbitrary_files(self):
for path, name in self.arbitrary_files.items():
self.transfer_tracker.handle_transfer(path, path_type.UNSTRUCTURED, name=name)
def __upload_input_files(self):
for input_file in self.input_files:
self.__upload_input_file(input_file)
self.__upload_input_extra_files(input_file)
def __upload_input_file(self, input_file):
if self.__stage_input(input_file):
if exists(input_file):
self.transfer_tracker.handle_transfer(input_file, path_type.INPUT)
else:
message = "Pulsar: __upload_input_file called on empty or missing dataset." + \
" So such file: [%s]" % input_file
log.debug(message)
def __upload_input_extra_files(self, input_file):
files_path = "%s_files" % input_file[0:-len(".dat")]
if exists(files_path) and self.__stage_input(files_path):
for extra_file_name in directory_files(files_path):
extra_file_path = join(files_path, extra_file_name)
remote_name = self.path_helper.remote_name(relpath(extra_file_path, dirname(files_path)))
self.transfer_tracker.handle_transfer(extra_file_path, path_type.INPUT, name=remote_name)
def __upload_working_directory_files(self):
# Task manager stages files into working directory, these need to be
# uploaded if present.
working_directory_files = self.__working_directory_files()
for working_directory_file in working_directory_files:
path = join(self.working_directory, working_directory_file)
self.transfer_tracker.handle_transfer(path, path_type.WORKDIR)
def __working_directory_files(self):
if self.working_directory and exists(self.working_directory):
return listdir(self.working_directory)
else:
return []
def __initialize_version_file_rename(self):
version_file = self.version_file
if version_file:
remote_path = self.path_helper.remote_join(self.new_outputs_directory, COMMAND_VERSION_FILENAME)
self.transfer_tracker.register_rewrite(version_file, remote_path, path_type.OUTPUT)
def __initialize_output_file_renames(self):
for output_file in self.output_files:
remote_path = self.path_helper.remote_join(self.new_outputs_directory, basename(output_file))
self.transfer_tracker.register_rewrite(output_file, remote_path, path_type.OUTPUT)
def __initialize_task_output_file_renames(self):
for output_file in self.output_files:
name = basename(output_file)
task_file = join(self.working_directory, name)
remote_path = self.path_helper.remote_join(self.new_working_directory, name)
self.transfer_tracker.register_rewrite(task_file, remote_path, path_type.OUTPUT_WORKDIR)
def __initialize_config_file_renames(self):
for config_file in self.config_files:
remote_path = self.path_helper.remote_join(self.new_configs_directory, basename(config_file))
self.transfer_tracker.register_rewrite(config_file, remote_path, path_type.CONFIG)
def __handle_rewrites(self):
"""
For each file that has been transferred and renamed, updated
command_line and configfiles to reflect that rewrite.
"""
self.transfer_tracker.rewrite_input_paths()
def __upload_rewritten_config_files(self):
for config_file, new_config_contents in self.job_inputs.config_files.items():
self.transfer_tracker.handle_transfer(config_file, type=path_type.CONFIG, contents=new_config_contents)
def get_command_line(self):
"""
Returns the rewritten version of the command line to execute suitable
for remote host.
"""
return self.job_inputs.command_line
def __stage_input(self, file_path):
# If we have disabled path rewriting, just assume everything needs to be transferred,
# else check to ensure the file is referenced before transferring it.
return (not self.rewrite_paths) or self.job_inputs.path_referenced(file_path)
class JobInputs(object):
"""
Abstractions over dynamic inputs created for a given job (namely the command to
execute and created configfiles).
**Parameters**
command_line : str
Local command to execute for this job. (To be rewritten.)
config_files : str
Config files created for this job. (To be rewritten.)
>>> import tempfile
>>> tf = tempfile.NamedTemporaryFile()
>>> def setup_inputs(tf):
... open(tf.name, "w").write(u"world /path/to/input the rest")
... inputs = JobInputs(u"hello /path/to/input", [tf.name])
... return inputs
>>> inputs = setup_inputs(tf)
>>> inputs.rewrite_paths(u"/path/to/input", u'C:\\input')
>>> inputs.command_line == u'hello C:\\\\input'
True
>>> inputs.config_files[tf.name] == u'world C:\\\\input the rest'
True
>>> tf.close()
>>> tf = tempfile.NamedTemporaryFile()
>>> inputs = setup_inputs(tf)
>>> inputs.find_referenced_subfiles('/path/to') == [u'/path/to/input']
True
>>> inputs.path_referenced('/path/to')
True
>>> inputs.path_referenced(u'/path/to')
True
>>> inputs.path_referenced('/path/to/input')
True
>>> inputs.path_referenced('/path/to/notinput')
False
>>> tf.close()
"""
def __init__(self, command_line, config_files):
self.command_line = command_line
self.config_files = {}
for config_file in config_files or []:
config_contents = _read(config_file)
self.config_files[config_file] = config_contents
def find_pattern_references(self, pattern):
referenced_files = set()
for input_contents in self.__items():
referenced_files.update(findall(pattern, input_contents))
return list(referenced_files)
def find_referenced_subfiles(self, directory):
"""
Return list of files below specified `directory` in job inputs. Could
use more sophisticated logic (match quotes to handle spaces, handle
subdirectories, etc...).
**Parameters**
directory : str
Full path to directory to search.
"""
if directory is None:
return []
pattern = r"(%s%s\S+)" % (directory, sep)
return self.find_pattern_references(pattern)
def path_referenced(self, path):
pattern = r"%s" % path
found = False
for input_contents in self.__items():
if findall(pattern, input_contents):
found = True
break
return found
def rewrite_paths(self, local_path, remote_path):
"""
Rewrite references to `local_path` with `remote_path` in job inputs.
"""
self.__rewrite_command_line(local_path, remote_path)
self.__rewrite_config_files(local_path, remote_path)
def __rewrite_command_line(self, local_path, remote_path):
self.command_line = self.command_line.replace(local_path, remote_path)
def __rewrite_config_files(self, local_path, remote_path):
for config_file, contents in self.config_files.items():
self.config_files[config_file] = contents.replace(local_path, remote_path)
def __items(self):
items = [self.command_line]
items.extend(self.config_files.values())
return items
class TransferTracker(object):
def __init__(self, client, path_helper, action_mapper, job_inputs, rewrite_paths):
self.client = client
self.path_helper = path_helper
self.action_mapper = action_mapper
self.job_inputs = job_inputs
self.rewrite_paths = rewrite_paths
self.file_renames = {}
self.remote_staging_actions = []
def handle_transfer(self, path, type, name=None, contents=None):
action = self.__action_for_transfer(path, type, contents)
if action.staging_needed:
local_action = action.staging_action_local
if local_action:
response = self.client.put_file(path, type, name=name, contents=contents, action_type=action.action_type)
def get_path():
return response['path']
else:
job_directory = self.client.job_directory
assert job_directory, "job directory required for action %s" % action
if not name:
name = basename(path)
self.__add_remote_staging_input(action, name, type)
def get_path():
return job_directory.calculate_path(name, type)
register = self.rewrite_paths or type == 'tool' # Even if inputs not rewritten, tool must be.
if register:
self.register_rewrite(path, get_path(), type, force=True)
elif self.rewrite_paths:
path_rewrite = action.path_rewrite(self.path_helper)
if path_rewrite:
self.register_rewrite(path, path_rewrite, type, force=True)
# else: # No action for this file
def __add_remote_staging_input(self, action, name, type):
input_dict = dict(
name=name,
type=type,
action=action.to_dict(),
)
self.remote_staging_actions.append(input_dict)
def __action_for_transfer(self, path, type, contents):
if contents:
# If contents loaded in memory, no need to write out file and copy,
# just transfer.
action = MessageAction(contents=contents, client=self.client)
else:
if not exists(path):
message = "handle_tranfer called on non-existent file - [%s]" % path
log.warn(message)
raise Exception(message)
action = self.__action(path, type)
return action
def register_rewrite(self, local_path, remote_path, type, force=False):
action = self.__action(local_path, type)
if action.staging_needed or force:
self.file_renames[local_path] = remote_path
def rewrite_input_paths(self):
"""
For each file that has been transferred and renamed, updated
command_line and configfiles to reflect that rewrite.
"""
for local_path, remote_path in self.file_renames.items():
self.job_inputs.rewrite_paths(local_path, remote_path)
def __action(self, path, type):
return self.action_mapper.action(path, type)
def _read(path):
"""
Utility method to quickly read small files (config files and tool
wrappers) into memory as bytes.
"""
input = open(path, "r", encoding="utf-8")
try:
return input.read()
finally:
input.close()
__all__ = ['submit_job']
| |
import inspect
import textwrap
from unittest import TestCase, main
from lark import Lark
from lark.indenter import PythonIndenter
from lark.exceptions import UnexpectedCharacters, UnexpectedToken, ParseError
valid_DEC_NUMBER = [
"0",
"000",
"0_0_0",
"4_2",
"1_0000_0000",
"123456789012345678901234567890",
]
valid_HEX_NUMBER = [
"0x_f",
"0xffff_ffff",
"0xffffffffffffffff",
"0Xffffffffffffffff",
]
valid_OCT_NUMBER = [
"0o5_7_7",
"0o_5",
"0o77777777777777777",
"0O77777777777777777",
]
valid_BIN_NUMBER = [
"0b1001_0100",
"0b_0",
"0b100000000000000000000000000000000000000000000000000000000000000000000",
"0B111111111111111111111111111111111111111111111111111111111111111111111",
]
valid_FLOAT_NUMBER = [
"1_00_00.5",
"1_00_00.5e5",
"1_00_00e5_1",
"1e1_0",
".1_4",
".1_4e1",
"1_2.5",
"3.14",
"314.",
"0.314",
"000.314",
".314",
"3e14",
"3E14",
"3e-14",
"3e+14",
"3.e14",
".3e14",
"3.1e4",
]
valid_IMAG_NUMBER = [
"0j",
"123456789012345678901234567890j",
"1_00_00j",
"1_00_00.5j",
"1_00_00e5_1j",
".1_4j",
"3_3j",
".5_6j",
"3.14j",
"314.j",
"0.314j",
"000.314j",
".314j",
"3e14j",
"3E14j",
"3e-14j",
"3e+14j",
"3.e14j",
".3e14j",
"3.1e4j",
]
valid_number = (valid_DEC_NUMBER + valid_HEX_NUMBER + valid_OCT_NUMBER +
valid_BIN_NUMBER + valid_FLOAT_NUMBER + valid_IMAG_NUMBER)
invalid_number = [
"0_",
"42_",
"1.4j_",
"0x_",
"0b1_",
"0xf_",
"0o5_",
"1_Else",
"0_b0",
"0_xf",
"0_o5",
"0_7",
"09_99",
"4_______2",
"0.1__4",
"0.1__4j",
"0b1001__0100",
"0xffff__ffff",
"0x___",
"0o5__77",
"1e1__0",
"1e1__0j",
"1_.4",
"1_.4j",
"1._4",
"1._4j",
"._5",
"._5j",
"1.0e+_1",
"1.0e+_1j",
"1.4_j",
"1.4e5_j",
"1_e1",
"1.4_e1",
"1.4_e1j",
"1e_1",
"1.4e_1",
"1.4e_1j",
"1+1.5_j_",
"1+1.5_j",
"_0",
"_42",
"_1.4j",
"_0x",
"_0b1",
"_0xf",
"_0o5",
"_1_Else",
"_0_b0",
"_0_xf",
"_0_o5",
"_0_7",
"_09_99",
"_4_______2",
"_0.1__4",
"_0.1__4j",
"_0b1001__0100",
"_0xffff__ffff",
"_0x__",
"_0o5__77",
"_1e1__0",
"_1e1__0j",
"_1_.4",
"_1_.4j",
"_1._4",
"_1._4j",
"_._5",
"_._5j",
"_1.0e+_1",
"_1.0e+_1j",
"_1.4_j",
"_1.4e5_j",
"_1_e1",
"_1.4_e1",
"_1.4_e1j",
"_1e_1",
"_1.4e_1",
"_1.4e_1j",
"_1+1.5_j",
"_1+1.5_j",
]
valid_match_statements = [
# constant and capture patterns
textwrap.dedent("""
match greeting:
case "":
print("Hello!")
case name:
print(f"Hi {name}!")
"""),
# pattern unions
textwrap.dedent("""
match something:
case 0 | 1 | 2:
print("Small number")
case [] | [_]:
print("A short sequence")
case str() | bytes():
print("Something string-like")
case _:
print("Something else")
"""),
# guards
textwrap.dedent("""
match val:
case [x, y] if x > 0 and y > 0:
return f"A pair of {x} and {y}"
case [x, *other]:
return f"A sequence starting with {x}"
case int():
return f"Some integer"
"""),
# "as" patterns
textwrap.dedent("""
match command.split():
case ["go", ("north" | "south" | "east" | "west") as direction]:
current_room = current_room.neighbor(direction)
""")
]
invalid_match_statements = [
# no cases
textwrap.dedent("""
match val:
pass
"""),
# cases not indented relative to match
textwrap.dedent("""
match val:
case x:
pass
""")
]
class TestPythonParser(TestCase):
@classmethod
def setUpClass(cls):
cls.python_parser = Lark.open_from_package(
"lark", "python.lark", ("grammars",), parser='lalr',
postlex=PythonIndenter(), start=["number", "file_input"])
def _test_parsed_is_this_terminal(self, text, terminal, start):
tree = self.python_parser.parse(text, start=start)
self.assertEqual(len(tree.children), 1)
token = tree.children[0]
self.assertEqual(token.type, terminal)
self.assertEqual(token.value, text)
def _test_parsed_is_file_containing_only_this_statement(self, text, statement):
tree = self.python_parser.parse(text, start="file_input")
self.assertEqual(len(tree.children), 1)
statement_token = tree.children[0].data
self.assertEqual(statement_token.type, "RULE")
self.assertEqual(statement_token.value, statement)
def test_DEC_NUMBER(self):
for case in valid_DEC_NUMBER:
self._test_parsed_is_this_terminal(case, "DEC_NUMBER", "number")
def test_HEX_NUMBER(self):
for case in valid_HEX_NUMBER:
self._test_parsed_is_this_terminal(case, "HEX_NUMBER", "number")
def test_OCT_NUMBER(self):
for case in valid_OCT_NUMBER:
self._test_parsed_is_this_terminal(case, "OCT_NUMBER", "number")
def test_BIN_NUMBER(self):
for case in valid_BIN_NUMBER:
self._test_parsed_is_this_terminal(case, "BIN_NUMBER", "number")
def test_FLOAT_NUMBER(self):
for case in valid_FLOAT_NUMBER:
self._test_parsed_is_this_terminal(case, "FLOAT_NUMBER", "number")
def test_IMAG_NUMBER(self):
for case in valid_IMAG_NUMBER:
self._test_parsed_is_this_terminal(case, "IMAG_NUMBER", "number")
def test_valid_number(self):
# XXX: all valid test cases should run with the above tests for numbers
for case in valid_number:
self.python_parser.parse(case, start="number") # no error
def test_invalid_number(self):
for case in invalid_number:
with self.assertRaises((UnexpectedCharacters, UnexpectedToken)):
self.python_parser.parse(case, start="number")
def test_valid_match_statement(self):
for case in valid_match_statements:
self._test_parsed_is_file_containing_only_this_statement(case, "match_stmt")
def test_invalid_match_statement(self):
for case in invalid_match_statements:
with self.assertRaises(ParseError):
self.python_parser.parse(case, start="file_input")
def test_assign_to_variable_named_match(self):
text = textwrap.dedent("""
match = re.match(pattern, string)
""")
self._test_parsed_is_file_containing_only_this_statement(text, "assign_stmt")
def test_assign_expr_with_variable_named_match(self):
text = textwrap.dedent("""
if match := re.match(pattern, string):
do_thing(match)
""")
self._test_parsed_is_file_containing_only_this_statement(text, "if_stmt")
if __name__ == '__main__':
main()
| |
from __future__ import print_function
from __future__ import unicode_literals
import logging
import re
import signal
import sys
from inspect import getdoc
from operator import attrgetter
from docker.errors import APIError
from requests.exceptions import ReadTimeout
from .. import __version__
from .. import legacy
from ..config import parse_environment
from ..const import DEFAULT_TIMEOUT
from ..const import HTTP_TIMEOUT
from ..const import IS_WINDOWS_PLATFORM
from ..progress_stream import StreamOutputError
from ..project import ConfigurationError
from ..project import NoSuchService
from ..service import BuildError
from ..service import ConvergenceStrategy
from ..service import NeedsBuildError
from .command import Command
from .docopt_command import NoSuchCommand
from .errors import UserError
from .formatter import Formatter
from .log_printer import LogPrinter
from .utils import get_version_info
from .utils import yesno
if not IS_WINDOWS_PLATFORM:
import dockerpty
log = logging.getLogger(__name__)
console_handler = logging.StreamHandler(sys.stderr)
INSECURE_SSL_WARNING = """
Warning: --allow-insecure-ssl is deprecated and has no effect.
It will be removed in a future version of Compose.
"""
def main():
setup_logging()
try:
command = TopLevelCommand()
command.sys_dispatch()
except KeyboardInterrupt:
log.error("\nAborting.")
sys.exit(1)
except (UserError, NoSuchService, ConfigurationError, legacy.LegacyError) as e:
log.error(e.msg)
sys.exit(1)
except NoSuchCommand as e:
log.error("No such command: %s", e.command)
log.error("")
log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand))))
sys.exit(1)
except APIError as e:
log.error(e.explanation)
sys.exit(1)
except BuildError as e:
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
sys.exit(1)
except StreamOutputError as e:
log.error(e)
sys.exit(1)
except NeedsBuildError as e:
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
sys.exit(1)
except ReadTimeout as e:
log.error(
"An HTTP request took too long to complete. Retry with --verbose to obtain debug information.\n"
"If you encounter this issue regularly because of slow network conditions, consider setting "
"COMPOSE_HTTP_TIMEOUT to a higher value (current value: %s)." % HTTP_TIMEOUT
)
def setup_logging():
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests logging
logging.getLogger("requests").propagate = False
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand(Command):
"""Define and run multi-container applications with Docker.
Usage:
docker-compose [-f=<arg>...] [options] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name (default: directory name)
--verbose Show more output
-v, --version Print version and exit
Commands:
build Build or rebuild services
help Get help on a command
kill Kill containers
logs View output from containers
pause Pause services
port Print the public port for a port binding
ps List containers
pull Pulls service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
start Start services
stop Stop services
unpause Unpause services
up Create and start containers
migrate-to-labels Recreate containers to add labels
version Show the Docker-Compose version information
"""
def docopt_options(self):
options = super(TopLevelCommand, self).docopt_options()
options['version'] = get_version_info('compose')
return options
def perform_command(self, options, *args, **kwargs):
if options.get('--verbose'):
console_handler.setFormatter(logging.Formatter('%(name)s.%(funcName)s: %(message)s'))
console_handler.setLevel(logging.DEBUG)
else:
console_handler.setFormatter(logging.Formatter())
console_handler.setLevel(logging.INFO)
return super(TopLevelCommand, self).perform_command(options, *args, **kwargs)
def build(self, project, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
Usage: build [options] [SERVICE...]
Options:
--no-cache Do not use cache when building the image.
--pull Always attempt to pull a newer version of the image.
-v VERSION Put a version, latest by default
"""
no_cache = bool(options.get('--no-cache', False))
pull = bool(options.get('--pull', False))
version = options.get('-v', None)
project.build(service_names=options['SERVICE'], no_cache=no_cache, pull=pull, version=version)
def help(self, project, options):
"""
Get help on a command.
Usage: help COMMAND
"""
handler = self.get_handler(options['COMMAND'])
raise SystemExit(getdoc(handler))
def kill(self, project, options):
"""
Force stop service containers.
Usage: kill [options] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
project.kill(service_names=options['SERVICE'], signal=signal)
def logs(self, project, options):
"""
View output from containers.
Usage: logs [options] [SERVICE...]
Options:
--no-color Produce monochrome output.
"""
containers = project.containers(service_names=options['SERVICE'], stopped=True)
monochrome = options['--no-color']
print("Attaching to", list_containers(containers))
LogPrinter(containers, monochrome=monochrome).run()
def pause(self, project, options):
"""
Pause services.
Usage: pause [SERVICE...]
"""
project.pause(service_names=options['SERVICE'])
def port(self, project, options):
"""
Print the public port for a port binding.
Usage: port [options] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp [default: tcp]
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
def ps(self, project, options):
"""
List containers.
Usage: ps [options] [SERVICE...]
Options:
-q Only display IDs
"""
containers = sorted(
project.containers(service_names=options['SERVICE'], stopped=True) +
project.containers(service_names=options['SERVICE'], one_off=True),
key=attrgetter('name'))
if options['-q']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Image',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
container.image_name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter().table(headers, rows))
def pull(self, project, options):
"""
Pulls images for services.
Usage: pull [options] [SERVICE...]
Options:
--ignore-pull-failures Pull what it can and ignores images with pull failures.
--allow-insecure-ssl Deprecated - no effect.
"""
if options['--allow-insecure-ssl']:
log.warn(INSECURE_SSL_WARNING)
project.pull(
service_names=options['SERVICE'],
ignore_pull_failures=options.get('--ignore-pull-failures')
)
def rm(self, project, options):
"""
Remove stopped service containers.
Usage: rm [options] [SERVICE...]
Options:
-f, --force Don't ask to confirm removal
-v Remove volumes associated with containers
"""
all_containers = project.containers(service_names=options['SERVICE'], stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
if len(stopped_containers) > 0:
print("Going to remove", list_containers(stopped_containers))
if options.get('--force') \
or yesno("Are you sure? [yN] ", default=False):
project.remove_stopped(
service_names=options['SERVICE'],
v=options.get('-v', False)
)
else:
print("No stopped containers")
def run(self, project, options):
"""
Run a one-off command on a service.
For example:
$ docker-compose run web python manage.py shell
By default, linked services will be started, unless they are already
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
Usage: run [options] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
Options:
--allow-insecure-ssl Deprecated - no effect.
-d Detached mode: Run container in the background, print
new container name.
--name NAME Assign a name to the container
--entrypoint CMD Override the entrypoint of the image.
-e KEY=VAL Set an environment variable (can be used multiple times)
-u, --user="" Run as specified username or uid
--no-deps Don't start linked services.
--rm Remove container after run. Ignored in detached mode.
-p, --publish=[] Publish a container's port(s) to the host
--service-ports Run command with the service's ports enabled and mapped
to the host.
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
"""
service = project.get_service(options['SERVICE'])
detach = options['-d']
if IS_WINDOWS_PLATFORM and not detach:
raise UserError(
"Interactive mode is not yet supported on Windows.\n"
"Please pass the -d flag when using `docker-compose run`."
)
if options['--allow-insecure-ssl']:
log.warn(INSECURE_SSL_WARNING)
if not options['--no-deps']:
deps = service.get_linked_service_names()
if len(deps) > 0:
project.up(
service_names=deps,
start_deps=True,
strategy=ConvergenceStrategy.never,
)
tty = True
if detach or options['-T'] or not sys.stdin.isatty():
tty = False
if options['COMMAND']:
command = [options['COMMAND']] + options['ARGS']
else:
command = service.options.get('command')
container_options = {
'command': command,
'tty': tty,
'stdin_open': not detach,
'detach': detach,
}
if options['-e']:
container_options['environment'] = parse_environment(options['-e'])
if options['--entrypoint']:
container_options['entrypoint'] = options.get('--entrypoint')
if options['--rm']:
container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
if not options['--service-ports']:
container_options['ports'] = []
if options['--publish']:
container_options['ports'] = options.get('--publish')
if options['--publish'] and options['--service-ports']:
raise UserError(
'Service port mapping and manual port mapping '
'can not be used togather'
)
if options['--name']:
container_options['name'] = options['--name']
try:
container = service.create_container(
quiet=True,
one_off=True,
**container_options
)
except APIError as e:
legacy.check_for_legacy_containers(
project.client,
project.name,
[service.name],
allow_one_off=False,
)
raise e
if detach:
service.start_container(container)
print(container.name)
else:
dockerpty.start(project.client, container.id, interactive=not options['-T'])
exit_code = container.wait()
if options['--rm']:
project.client.remove_container(container.id)
sys.exit(exit_code)
def scale(self, project, options):
"""
Set number of containers to run for a service.
Numbers are specified in the form `service=num` as arguments.
For example:
$ docker-compose scale web=2 worker=3
Usage: scale [options] [SERVICE=NUM...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
for s in options['SERVICE=NUM']:
if '=' not in s:
raise UserError('Arguments to scale should be in the form service=num')
service_name, num = s.split('=', 1)
try:
num = int(num)
except ValueError:
raise UserError('Number of containers for service "%s" is not a '
'number' % service_name)
project.get_service(service_name).scale(num, timeout=timeout)
def start(self, project, options):
"""
Start existing containers.
Usage: start [SERVICE...]
"""
project.start(service_names=options['SERVICE'])
def stop(self, project, options):
"""
Stop running containers without removing them.
They can be started again with `docker-compose start`.
Usage: stop [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.stop(service_names=options['SERVICE'], timeout=timeout)
def restart(self, project, options):
"""
Restart running containers.
Usage: restart [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.restart(service_names=options['SERVICE'], timeout=timeout)
def unpause(self, project, options):
"""
Unpause services.
Usage: unpause [SERVICE...]
"""
project.unpause(service_names=options['SERVICE'])
def up(self, project, options):
"""
Builds, (re)creates, starts, and attaches to containers for a service.
Unless they are already running, this command also starts any linked services.
The `docker-compose up` command aggregates the output of each container. When
the command exits, all containers are stopped. Running `docker-compose up -d`
starts the containers in the background and leaves them running.
If there are existing containers for a service, and the service's configuration
or image was changed after the container's creation, `docker-compose up` picks
up the changes by stopping and recreating the containers (preserving mounted
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
flag.
If you want to force Compose to stop and recreate all containers, use the
`--force-recreate` flag.
Usage: up [options] [SERVICE...]
Options:
--allow-insecure-ssl Deprecated - no effect.
-d Detached mode: Run containers in the background,
print new container names.
--no-color Produce monochrome output.
--no-deps Don't start linked services.
--force-recreate Recreate containers even if their configuration and
image haven't changed. Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate them.
Incompatible with --force-recreate.
--no-build Don't build an image, even if it's missing
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
when attached or when containers are already
running. (default: 10)
-v VERSION Put a version, latest by default
"""
if options['--allow-insecure-ssl']:
log.warn(INSECURE_SSL_WARNING)
monochrome = options['--no-color']
start_deps = not options['--no-deps']
service_names = options['SERVICE']
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
version = options.get('-v', None)
to_attach = project.up(
service_names=service_names,
start_deps=start_deps,
strategy=convergence_strategy_from_opts(options),
do_build=not options['--no-build'],
timeout=timeout,
version=version
)
if not options['-d']:
log_printer = build_log_printer(to_attach, service_names, monochrome)
attach_to_logs(project, log_printer, service_names, timeout)
def migrate_to_labels(self, project, _options):
"""
Recreate containers to add labels
If you're coming from Compose 1.2 or earlier, you'll need to remove or
migrate your existing containers after upgrading Compose. This is
because, as of version 1.3, Compose uses Docker labels to keep track
of containers, and so they need to be recreated with labels added.
If Compose detects containers that were created without labels, it
will refuse to run so that you don't end up with two sets of them. If
you want to keep using your existing containers (for example, because
they have data volumes you want to preserve) you can migrate them with
the following command:
docker-compose migrate-to-labels
Alternatively, if you're not worried about keeping them, you can
remove them - Compose will just create new ones.
docker rm -f myapp_web_1 myapp_db_1 ...
Usage: migrate-to-labels
"""
legacy.migrate_project_to_labels(project)
def version(self, project, options):
"""
Show version informations
Usage: version [--short]
Options:
--short Shows only Compose's version number.
"""
if options['--short']:
print(__version__)
else:
print(get_version_info('full'))
def convergence_strategy_from_opts(options):
no_recreate = options['--no-recreate']
force_recreate = options['--force-recreate']
if force_recreate and no_recreate:
raise UserError("--force-recreate and --no-recreate cannot be combined.")
if force_recreate:
return ConvergenceStrategy.always
if no_recreate:
return ConvergenceStrategy.never
return ConvergenceStrategy.changed
def build_log_printer(containers, service_names, monochrome):
if service_names:
containers = [c for c in containers if c.service in service_names]
return LogPrinter(containers, monochrome=monochrome)
def attach_to_logs(project, log_printer, service_names, timeout):
print("Attaching to", list_containers(log_printer.containers))
try:
log_printer.run()
finally:
def handler(signal, frame):
project.kill(service_names=service_names)
sys.exit(0)
signal.signal(signal.SIGINT, handler)
print("Gracefully stopping... (press Ctrl+C again to force)")
project.stop(service_names=service_names, timeout=timeout)
def list_containers(containers):
return ", ".join(c.name for c in containers)
| |
from unittest import TestCase
from board import Board
from util.input_parser import parse
from util.enums import Player, Side
from piece import Pawn, Queen, Rook, Bishop, Knight, King
from board.location import Location
class TestBoard(TestCase):
def setUp(self):
self.board = Board()
self.board2 = Board(False)
King(Location(8, 'e'), Player.BLACK, self.board2)
King(Location(1, 'e'), Player.WHITE, self.board2)
def batch_move(self, board, moves):
for move in moves:
move = parse(board, move)
self.board.move(move)
def test_two_move_checkmate(self):
moves = [
'f2 f3',
'e7 e5',
'g2 g4',
'd8 h4',
]
self.batch_move(self.board, moves)
self.assertTrue(self.board.checkmate())
def test_draw(self):
moves = [
'c2 c4',
'h7 h5',
'h2 h4',
'a7 a5',
'd1 a4',
'a8 a6',
'a4 a5',
'a6 h6',
'a5 c7',
'f7 f6',
'c7 d7',
'e8 f7',
'd7 b7',
'd8 d3',
'b7 b8',
'd3 h7',
'b8 c8',
'f7 g6',
'c8 e6',
]
self.assertFalse(self.board.draw())
self.batch_move(self.board, moves)
self.assertTrue(self.board.draw())
self.assertFalse(self.board.checkmate())
def test_en_passant(self):
moves = [
'a2 a4',
'a7 a6',
'a4 a5',
'b7 b5',
]
self.batch_move(self.board, moves)
move = parse(self.board, 'a5 b6') # white en passant attack
self.assertTrue(move.en_passant)
self.assertTrue(move in self.board.valid_moves())
self.assertEquals(self.board.score(Player.WHITE), 0)
self.board.move(move)
self.assertEquals(self.board.score(Player.WHITE), Pawn.VALUE)
self.board.undo_move()
self.assertEquals(self.board.score(Player.WHITE), 0)
def test_king_side_castle(self):
moves = [
'g1 h3',
'a7 a6',
'g2 g4',
'b7 b6',
'f1 g2',
'c7 c6',
]
self.batch_move(self.board, moves)
move = parse(self.board, 'e1 g1') # white
self.assertTrue(move.castle)
self.assertEquals(move.castle_side, Side.KING)
self.assertEquals(str(move), 'O-O')
valid_moves = list(self.board.valid_moves())
self.assertTrue(move in valid_moves)
self.board.move(move)
def test_queen_side_castle(self):
moves = [
'b1 a3',
'a7 a6',
'b2 b4',
'b7 b6',
'c1 b2',
'c7 c6',
'c2 c4',
'd7 d6',
'd1 c2',
'e7 e6',
]
self.batch_move(self.board, moves)
move = parse(self.board, 'e1 c1') # white
self.assertTrue(move.castle)
self.assertTrue(move in self.board.valid_moves())
self.assertEquals(move.castle_side, Side.QUEEN)
self.assertEquals(str(move), 'O-O-O')
self.board.move(move)
def test_king_side_castle_check_in_middle(self):
Rook(Location(1, 'h'), Player.WHITE, self.board2)
Queen(Location(2, 'f'), Player.BLACK, self.board2)
with self.assertRaises(IOError):
parse(self.board2, 'e1 g1')
def test_queen_side_castle_check_in_middle(self):
Rook(Location(1, 'a'), Player.WHITE, self.board2)
Queen(Location(2, 'd'), Player.BLACK, self.board2)
with self.assertRaises(IOError):
parse(self.board2, 'e1 c1')
def test_promotion(self):
moves = [
'a2 a4',
'b7 b5',
'a4 b5',
'h7 h6',
'b5 b6',
'g7 g6',
'b6 a7',
'f7 f6',
]
self.batch_move(self.board, moves)
with self.assertRaises(IOError):
parse(self.board, 'a7 b8')
move = parse(self.board, 'a7 b8 q')
self.assertTrue(move.promotion)
self.assertEquals(move.promotion_piece_class, Queen)
move = parse(self.board, 'a7 b8 r')
self.assertTrue(move.promotion)
self.assertEquals(move.promotion_piece_class, Rook)
move = parse(self.board, 'a7 b8 b')
self.assertTrue(move.promotion)
self.assertEquals(move.promotion_piece_class, Bishop)
move = parse(self.board, 'a7 b8 n')
self.assertTrue(move.promotion)
self.assertEquals(move.promotion_piece_class, Knight)
self.board.move(move)
self.assertEquals(self.board.piece(move.new_location).__class__, Knight)
self.board.undo_move()
p1 = self.board.piece(move.old_location)
p2 = self.board.piece(move.new_location, move.piece.player)
self.assertEquals(p1.__class__, Pawn)
self.assertEquals(p2, None)
def test_random_game(self):
for i in range(10):
check, draw, checkmate = self.board.status()
if checkmate or draw:
break
self.assertEquals(len(self.board._moves), i)
move = self.board.random_move()
self.board.move(move)
def test_score(self):
self.assertEquals(self.board2.score(Player.BLACK), 0)
p = Pawn(Location(2, 'a'), Player.WHITE, self.board2)
self.board2.capture_piece(p)
self.assertEquals(self.board2.score(Player.BLACK), Pawn.VALUE)
k = Knight(Location(3, 'a'), Player.WHITE, self.board2)
self.board2.capture_piece(k)
self.assertEquals(self.board2.score(Player.BLACK), Pawn.VALUE + Knight.VALUE)
def test_score_not_captured(self):
self.assertEquals(self.board2.score(Player.WHITE, False), 0)
p = Pawn(Location(2, 'a'), Player.WHITE, self.board2)
self.assertEquals(self.board2.score(Player.WHITE, False), Pawn.VALUE)
k = Knight(Location(3, 'a'), Player.WHITE, self.board2)
self.assertEquals(self.board2.score(Player.WHITE, False), Pawn.VALUE + Knight.VALUE)
def test_last_move(self):
self.assertEquals(self.board2.last_move(), None)
def test_pieces(self):
all_pieces = list(self.board.pieces())
self.assertEquals(all_pieces, self.board._pieces)
kings = list(self.board.pieces(piece_class=King))
self.assertTrue(all([king.__class__ is King for king in kings]))
white = list(self.board.pieces(player=Player.WHITE))
self.assertTrue(all([p.player is Player.WHITE for p in white]))
white_knights = list(self.board.pieces(piece_class=Knight, player=Player.WHITE))
self.assertTrue(all([p.player is Player.WHITE and p.__class__ is Knight for p in white_knights]))
def test_str(self):
s = "-----------------------------------\n" \
"8 | R | N | B | Q | K | B | N | R |\n" \
"-----------------------------------\n" \
"7 | P | P | P | P | P | P | P | P |\n" \
"-----------------------------------\n" \
"6 | | | | | | | | |\n" \
"-----------------------------------\n" \
"5 | | | | | | | | |\n" \
"-----------------------------------\n" \
"4 | | | | | | | | |\n" \
"-----------------------------------\n" \
"3 | | | | | | | | |\n" \
"-----------------------------------\n" \
"2 | p | p | p | p | p | p | p | p |\n" \
"-----------------------------------\n" \
"1 | r | n | b | q | k | b | n | r |\n" \
"-----------------------------------\n" \
" | a | b | c | d | e | f | g | h |\n"
self.board.character_map.unicode = False
self.assertEquals(str(self.board), s)
| |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/CacheDir.py 4369 2009/09/19 15:58:29 scons"
__doc__ = """
CacheDir support
"""
import os.path
import stat
import string
import sys
import SCons.Action
cache_enabled = True
cache_debug = False
cache_force = False
cache_show = False
def CacheRetrieveFunc(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if not fs.exists(cachefile):
cd.CacheDebug('CacheRetrieve(%s): %s not in cache\n', t, cachefile)
return 1
cd.CacheDebug('CacheRetrieve(%s): retrieving from %s\n', t, cachefile)
if SCons.Action.execute_actions:
if fs.islink(cachefile):
fs.symlink(fs.readlink(cachefile), t.path)
else:
env.copy_from_cache(cachefile, t.path)
st = fs.stat(cachefile)
fs.chmod(t.path, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
return 0
def CacheRetrieveString(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if t.fs.exists(cachefile):
return "Retrieved `%s' from cache" % t.path
return None
CacheRetrieve = SCons.Action.Action(CacheRetrieveFunc, CacheRetrieveString)
CacheRetrieveSilent = SCons.Action.Action(CacheRetrieveFunc, None)
def CachePushFunc(target, source, env):
t = target[0]
if t.nocache:
return
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if fs.exists(cachefile):
# Don't bother copying it if it's already there. Note that
# usually this "shouldn't happen" because if the file already
# existed in cache, we'd have retrieved the file from there,
# not built it. This can happen, though, in a race, if some
# other person running the same build pushes their copy to
# the cache after we decide we need to build it but before our
# build completes.
cd.CacheDebug('CachePush(%s): %s already exists in cache\n', t, cachefile)
return
cd.CacheDebug('CachePush(%s): pushing to %s\n', t, cachefile)
tempfile = cachefile+'.tmp'+str(os.getpid())
errfmt = "Unable to copy %s to cache. Cache file is %s"
if not fs.isdir(cachedir):
try:
fs.makedirs(cachedir)
except EnvironmentError:
# We may have received an exception because another process
# has beaten us creating the directory.
if not fs.isdir(cachedir):
msg = errfmt % (str(target), cachefile)
raise SCons.Errors.EnvironmentError, msg
try:
if fs.islink(t.path):
fs.symlink(fs.readlink(t.path), tempfile)
else:
fs.copy2(t.path, tempfile)
fs.rename(tempfile, cachefile)
st = fs.stat(t.path)
fs.chmod(cachefile, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
except EnvironmentError:
# It's possible someone else tried writing the file at the
# same time we did, or else that there was some problem like
# the CacheDir being on a separate file system that's full.
# In any case, inability to push a file to cache doesn't affect
# the correctness of the build, so just print a warning.
msg = errfmt % (str(target), cachefile)
SCons.Warnings.warn(SCons.Warnings.CacheWriteErrorWarning, msg)
CachePush = SCons.Action.Action(CachePushFunc, None)
class CacheDir:
def __init__(self, path):
try:
import hashlib
except ImportError:
msg = "No hashlib or MD5 module available, CacheDir() not supported"
SCons.Warnings.warn(SCons.Warnings.NoMD5ModuleWarning, msg)
self.path = None
else:
self.path = path
self.current_cache_debug = None
self.debugFP = None
def CacheDebug(self, fmt, target, cachefile):
if cache_debug != self.current_cache_debug:
if cache_debug == '-':
self.debugFP = sys.stdout
elif cache_debug:
self.debugFP = open(cache_debug, 'w')
else:
self.debugFP = None
self.current_cache_debug = cache_debug
if self.debugFP:
self.debugFP.write(fmt % (target, os.path.split(cachefile)[1]))
def is_enabled(self):
return (cache_enabled and not self.path is None)
def cachepath(self, node):
"""
"""
if not self.is_enabled():
return None, None
sig = node.get_cachedir_bsig()
subdir = string.upper(sig[0])
dir = os.path.join(self.path, subdir)
return dir, os.path.join(dir, sig)
def retrieve(self, node):
"""
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Note that there's a special trick here with the execute flag
(one that's not normally done for other actions). Basically
if the user requested a no_exec (-n) build, then
SCons.Action.execute_actions is set to 0 and when any action
is called, it does its showing but then just returns zero
instead of actually calling the action execution operation.
The problem for caching is that if the file does NOT exist in
cache then the CacheRetrieveString won't return anything to
show for the task, but the Action.__call__ won't call
CacheRetrieveFunc; instead it just returns zero, which makes
the code below think that the file *was* successfully
retrieved from the cache, therefore it doesn't do any
subsequent building. However, the CacheRetrieveString didn't
print anything because it didn't actually exist in the cache,
and no more build actions will be performed, so the user just
sees nothing. The fix is to tell Action.__call__ to always
execute the CacheRetrieveFunc and then have the latter
explicitly check SCons.Action.execute_actions itself.
"""
if not self.is_enabled():
return False
env = node.get_build_env()
if cache_show:
if CacheRetrieveSilent(node, [], env, execute=1) == 0:
node.build(presub=0, execute=0)
return True
else:
if CacheRetrieve(node, [], env, execute=1) == 0:
return True
return False
def push(self, node):
if not self.is_enabled():
return
return CachePush(node, [], node.get_build_env())
def push_if_forced(self, node):
if cache_force:
return self.push(node)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
#!/usr/bin/python
# (c) 2016, Marcin Skarbek <github@skarbek.name>
# (c) 2016, Andreas Olsson <andreas@arrakis.se>
# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
#
# This module was ported from https://github.com/mskarbek/ansible-nsupdate
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nsupdate
short_description: Manage DNS records.
description:
- Create, update and remove DNS records using DDNS updates
version_added: "2.3"
requirements:
- dnspython
author: "Loic Blot (@nerzhul)"
options:
state:
description:
- Manage DNS record.
choices: ['present', 'absent']
default: 'present'
server:
description:
- Apply DNS modification on this server.
required: true
port:
description:
- Use this TCP port when connecting to C(server).
default: 53
version_added: 2.5
key_name:
description:
- Use TSIG key name to authenticate against DNS C(server)
key_secret:
description:
- Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
key_algorithm:
description:
- Specify key algorithm used by C(key_secret).
choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384',
'hmac-sha512']
default: 'hmac-md5'
zone:
description:
- DNS record will be modified on this C(zone).
- When omitted DNS will be queried to attempt finding the correct zone.
- Starting with Ansible 2.7 this parameter is optional.
record:
description:
- Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot).
required: true
type:
description:
- Sets the record type.
default: 'A'
ttl:
description:
- Sets the record TTL.
default: 3600
value:
description:
- Sets the record value.
protocol:
description:
- Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option.
default: 'tcp'
choices: ['tcp', 'udp']
version_added: 2.8
'''
EXAMPLES = '''
- name: Add or modify ansible.example.org A to 192.168.1.1"
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
zone: "example.org"
record: "ansible"
value: "192.168.1.1"
- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3"
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
zone: "example.org"
record: "ansible"
value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"]
- name: Remove puppet.example.org CNAME
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
zone: "example.org"
record: "puppet"
type: "CNAME"
state: absent
- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
record: "1.1.168.192.in-addr.arpa."
type: "PTR"
value: "ansible.example.org."
state: present
- name: Remove 1.1.168.192.in-addr.arpa. PTR
nsupdate:
key_name: "nsupdate"
key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
server: "10.1.1.1"
record: "1.1.168.192.in-addr.arpa."
type: "PTR"
state: absent
'''
RETURN = '''
changed:
description: If module has modified record
returned: success
type: str
record:
description: DNS record
returned: success
type: str
sample: 'ansible'
ttl:
description: DNS record TTL
returned: success
type: int
sample: 86400
type:
description: DNS record type
returned: success
type: str
sample: 'CNAME'
value:
description: DNS record value(s)
returned: success
type: list
sample: '192.168.1.1'
zone:
description: DNS record zone
returned: success
type: str
sample: 'example.org.'
dns_rc:
description: dnspython return code
returned: always
type: int
sample: 4
dns_rc_str:
description: dnspython return code (string representation)
returned: always
type: str
sample: 'REFUSED'
'''
import traceback
from binascii import Error as binascii_error
from socket import error as socket_error
DNSPYTHON_IMP_ERR = None
try:
import dns.update
import dns.query
import dns.tsigkeyring
import dns.message
import dns.resolver
HAVE_DNSPYTHON = True
except ImportError:
DNSPYTHON_IMP_ERR = traceback.format_exc()
HAVE_DNSPYTHON = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
class RecordManager(object):
def __init__(self, module):
self.module = module
if module.params['zone'] is None:
if module.params['record'][-1] != '.':
self.module.fail_json(msg='record must be absolute when omitting zone parameter')
self.zone = self.lookup_zone()
else:
self.zone = module.params['zone']
if self.zone[-1] != '.':
self.zone += '.'
if module.params['record'][-1] != '.':
self.fqdn = module.params['record'] + '.' + self.zone
else:
self.fqdn = module.params['record']
if module.params['key_name']:
try:
self.keyring = dns.tsigkeyring.from_text({
module.params['key_name']: module.params['key_secret']
})
except TypeError:
module.fail_json(msg='Missing key_secret')
except binascii_error as e:
module.fail_json(msg='TSIG key error: %s' % to_native(e))
else:
self.keyring = None
if module.params['key_algorithm'] == 'hmac-md5':
self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT'
else:
self.algorithm = module.params['key_algorithm']
if self.module.params['type'].lower() == 'txt':
self.value = list(map(self.txt_helper, self.module.params['value']))
else:
self.value = self.module.params['value']
self.dns_rc = 0
def txt_helper(self, entry):
if entry[0] == '"' and entry[-1] == '"':
return entry
return '"{text}"'.format(text=entry)
def lookup_zone(self):
name = dns.name.from_text(self.module.params['record'])
while True:
query = dns.message.make_query(name, dns.rdatatype.SOA)
try:
if self.module.params['protocol'] == 'tcp':
lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
else:
lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
except (socket_error, dns.exception.Timeout) as e:
self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]:
self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % (
self.module.params['server'], self.module.params['record']))
try:
zone = lookup.authority[0].name
if zone == name:
return zone.to_text()
except IndexError:
pass
try:
name = name.parent()
except dns.name.NoParent:
self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record']))
def __do_update(self, update):
response = None
try:
if self.module.params['protocol'] == 'tcp':
response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
else:
response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
except (socket_error, dns.exception.Timeout) as e:
self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
return response
def create_or_update_record(self):
result = {'changed': False, 'failed': False}
exists = self.record_exists()
if exists in [0, 2]:
if self.module.check_mode:
self.module.exit_json(changed=True)
if exists == 0:
self.dns_rc = self.create_record()
if self.dns_rc != 0:
result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc
elif exists == 2:
self.dns_rc = self.modify_record()
if self.dns_rc != 0:
result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc
if self.dns_rc != 0:
result['failed'] = True
else:
result['changed'] = True
else:
result['changed'] = False
return result
def create_record(self):
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
for entry in self.value:
try:
update.add(self.module.params['record'],
self.module.params['ttl'],
self.module.params['type'],
entry)
except AttributeError:
self.module.fail_json(msg='value needed when state=present')
except dns.exception.SyntaxError:
self.module.fail_json(msg='Invalid/malformed value')
response = self.__do_update(update)
return dns.message.Message.rcode(response)
def modify_record(self):
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
update.delete(self.module.params['record'], self.module.params['type'])
for entry in self.value:
try:
update.add(self.module.params['record'],
self.module.params['ttl'],
self.module.params['type'],
entry)
except AttributeError:
self.module.fail_json(msg='value needed when state=present')
except dns.exception.SyntaxError:
self.module.fail_json(msg='Invalid/malformed value')
response = self.__do_update(update)
return dns.message.Message.rcode(response)
def remove_record(self):
result = {'changed': False, 'failed': False}
if self.record_exists() == 0:
return result
# Check mode and record exists, declared fake change.
if self.module.check_mode:
self.module.exit_json(changed=True)
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
update.delete(self.module.params['record'], self.module.params['type'])
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
if self.dns_rc != 0:
result['failed'] = True
result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc
else:
result['changed'] = True
return result
def record_exists(self):
update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
try:
update.present(self.module.params['record'], self.module.params['type'])
except dns.rdatatype.UnknownRdatatype as e:
self.module.fail_json(msg='Record error: {0}'.format(to_native(e)))
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
if self.dns_rc == 0:
if self.module.params['state'] == 'absent':
return 1
for entry in self.value:
try:
update.present(self.module.params['record'], self.module.params['type'], entry)
except AttributeError:
self.module.fail_json(msg='value needed when state=present')
except dns.exception.SyntaxError:
self.module.fail_json(msg='Invalid/malformed value')
response = self.__do_update(update)
self.dns_rc = dns.message.Message.rcode(response)
if self.dns_rc == 0:
if self.ttl_changed():
return 2
else:
return 1
else:
return 2
else:
return 0
def ttl_changed(self):
query = dns.message.make_query(self.fqdn, self.module.params['type'])
try:
if self.module.params['protocol'] == 'tcp':
lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
else:
lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
except (socket_error, dns.exception.Timeout) as e:
self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
current_ttl = lookup.answer[0].ttl
return current_ttl != self.module.params['ttl']
def main():
tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224',
'hmac-sha256', 'hmac-sha384', 'hmac-sha512']
module = AnsibleModule(
argument_spec=dict(
state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
server=dict(required=True, type='str'),
port=dict(required=False, default=53, type='int'),
key_name=dict(required=False, type='str'),
key_secret=dict(required=False, type='str', no_log=True),
key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
zone=dict(required=False, default=None, type='str'),
record=dict(required=True, type='str'),
type=dict(required=False, default='A', type='str'),
ttl=dict(required=False, default=3600, type='int'),
value=dict(required=False, default=None, type='list'),
protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str')
),
supports_check_mode=True
)
if not HAVE_DNSPYTHON:
module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR)
if len(module.params["record"]) == 0:
module.fail_json(msg='record cannot be empty.')
record = RecordManager(module)
result = {}
if module.params["state"] == 'absent':
result = record.remove_record()
elif module.params["state"] == 'present':
result = record.create_or_update_record()
result['dns_rc'] = record.dns_rc
result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc)
if result['failed']:
module.fail_json(**result)
else:
result['record'] = dict(zone=record.zone,
record=module.params['record'],
type=module.params['type'],
ttl=module.params['ttl'],
value=record.value)
module.exit_json(**result)
if __name__ == '__main__':
main()
| |
import re
from collections import defaultdict
from .base_section import BaseSection
try:
from mtools.util.profile_collection import ProfileCollection
except ImportError:
ProfileCollection = None
class ConnectionSection(BaseSection):
"""
ConnectionSection class.
This section goes through the logfile and extracts information about
opened and closed connections.
"""
name = "connections"
def __init__(self, mloginfo):
BaseSection.__init__(self, mloginfo)
helptext = 'outputs information about opened and closed connections'
self.mloginfo.argparser_sectiongroup.add_argument('--connections',
action='store_true',
help=helptext)
helptext = ('outputs helpful statistics for connection '
'duration (min/max/avg)')
self.mloginfo.argparser_sectiongroup.add_argument('--connstats',
action='store_true',
help=helptext)
@property
def active(self):
"""Return boolean if this section is active."""
return(self.mloginfo.args['connections'] or
self.mloginfo.args['connstats'])
def run(self):
"""Run this section and print out information."""
if ProfileCollection and isinstance(self.mloginfo.logfile,
ProfileCollection):
print("\n not available for system.profile collections\n")
return
ip_opened = defaultdict(lambda: 0)
ip_closed = defaultdict(lambda: 0)
socket_exceptions = 0
START_TIME_EMPTY = -11
END_TIME_ALREADY_FOUND = -111
MIN_DURATION_EMPTY = 9999999999
MAX_DURATION_EMPTY = -1
end_connid_pattern = re.compile(r'\[conn(\d+)\]')
genstats = self.mloginfo.args['connstats']
if genstats:
connections_start = defaultdict(lambda: START_TIME_EMPTY)
ipwise_sum_durations = defaultdict(lambda: 0)
ipwise_count = defaultdict(lambda: 0)
ipwise_min_connection_duration = defaultdict(lambda:
MIN_DURATION_EMPTY)
ipwise_max_connection_duration = defaultdict(lambda:
MAX_DURATION_EMPTY)
min_connection_duration = MIN_DURATION_EMPTY
max_connection_duration = MAX_DURATION_EMPTY
sum_durations = 0
fullconn_counts = 0
for logevent in self.mloginfo.logfile:
line = logevent.line_str
pos = line.find('connection accepted')
if pos != -1:
# connection was opened, increase counter
tokens = line[pos:pos + 100].split(' ')
if tokens[3] == 'anonymous':
ip = 'anonymous'
else:
ip, _ = tokens[3].split(':')
ip_opened[ip] += 1
if genstats:
connid = tokens[4].strip('#')
dt = logevent.datetime
# Sanity checks
if connid.isdigit() is False or dt is None:
continue
if connections_start[connid] != START_TIME_EMPTY:
errmsg = ("Multiple start datetimes found for the "
"same connection ID. Consider analysing one "
"log sequence.")
raise NotImplementedError(errmsg)
connections_start[connid] = dt
pos = line.find('end connection')
if pos != -1:
# connection was closed, increase counter
tokens = line[pos:pos + 100].split(' ')
if tokens[2] == 'anonymous':
ip = 'anonymous'
else:
ip, _ = tokens[2].split(':')
ip_closed[ip] += 1
if genstats:
# Sanity check
if end_connid_pattern.search(line, re.M | re.I) is None:
continue
# The connection id value is stored just before end
# connection -> [conn385] end connection
end_connid = (end_connid_pattern.
search(line, re.M | re.I).group(1))
dt = logevent.datetime
# Sanity checks
if (end_connid.isdigit() is False or dt is None or
connections_start[end_connid] == START_TIME_EMPTY):
continue
if connections_start[end_connid] == END_TIME_ALREADY_FOUND:
errmsg = ("Multiple end datetimes found for the same "
"connection ID %s. Consider analysing one "
"log sequence.")
raise NotImplementedError(errmsg % (end_connid))
dur = dt - connections_start[end_connid]
dur_in_sec = dur.seconds
if dur_in_sec < min_connection_duration:
min_connection_duration = dur_in_sec
if dur_in_sec > max_connection_duration:
max_connection_duration = dur_in_sec
if dur_in_sec < ipwise_min_connection_duration[ip]:
ipwise_min_connection_duration[ip] = dur_in_sec
if dur_in_sec > ipwise_max_connection_duration[ip]:
ipwise_max_connection_duration[ip] = dur_in_sec
sum_durations += dur.seconds
fullconn_counts += 1
ipwise_sum_durations[ip] += dur_in_sec
ipwise_count[ip] += 1
connections_start[end_connid] = END_TIME_ALREADY_FOUND
if "SocketException" in line:
socket_exceptions += 1
# calculate totals
total_opened = sum(ip_opened.values())
total_closed = sum(ip_closed.values())
unique_ips = set(ip_opened.keys())
unique_ips.update(ip_closed.keys())
# output statistics
print(" total opened: %s" % total_opened)
print(" total closed: %s" % total_closed)
print(" no unique IPs: %s" % len(unique_ips))
print("socket exceptions: %s" % socket_exceptions)
if genstats:
if fullconn_counts > 0:
print("overall average connection duration(s): %s"
% (sum_durations / fullconn_counts))
print("overall minimum connection duration(s): %s"
% min_connection_duration)
print("overall maximum connection duration(s): %s"
% max_connection_duration)
else:
print("overall average connection duration(s): -")
print("overall minimum connection duration(s): -")
print("overall maximum connection duration(s): -")
print('')
for ip in sorted(unique_ips, key=lambda x: ip_opened[x], reverse=True):
opened = ip_opened[ip] if ip in ip_opened else 0
closed = ip_closed[ip] if ip in ip_closed else 0
if genstats:
covered_count = (
ipwise_count[ip]
if ip in ipwise_count
else 1)
connection_duration_ip = (
ipwise_sum_durations[ip]
if ip in ipwise_sum_durations
else 0)
ipwise_min_connection_duration_final = (
ipwise_min_connection_duration[ip]
if ipwise_min_connection_duration[ip] != MIN_DURATION_EMPTY
else 0)
ipwise_max_connection_duration_final = (
ipwise_max_connection_duration[ip]
if ipwise_max_connection_duration[ip] != MAX_DURATION_EMPTY
else 0)
print("%-15s opened: %-8i closed: %-8i dur-avg(s): %-8i "
"dur-min(s): %-8i dur-max(s): %-8i"
% (ip, opened, closed,
connection_duration_ip / covered_count,
ipwise_min_connection_duration_final,
ipwise_max_connection_duration_final))
else:
print("%-15s opened: %-8i closed: %-8i"
% (ip, opened, closed))
print('')
| |
# -*- coding: utf-8 -*-
from gluon import *
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
"""
Custom Controller Menus
The options menu (left-hand options menu) is individual for each
controller, so each controller has its own options menu function
in this class.
Each of these option menu functions can be customized separately,
by simply overriding (re-defining) the default function. The
options menu function must return an instance of the item layout.
The standard menu uses the M item layout class, but you can of
course also use any other layout class which you define in
layouts.py (can also be mixed).
Make sure additional helper functions in this class don't match
any current or future controller prefix (e.g. by using an
underscore prefix).
"""
# -------------------------------------------------------------------------
def hrm(self):
""" HRM / Human Resources Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
manager_mode = lambda i: s3.hrm.mode is None
personal_mode = lambda i: s3.hrm.mode is not None
is_org_admin = lambda i: s3.hrm.orgs and True or \
ADMIN in s3.roles
settings = current.deployment_settings
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
return M(c="hrm")(
M("Staff", f="staff",
check=manager_mode)(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", f="person", m="import",
vars={"group":"staff"}, p="create"),
),
M(teams, f="group",
check=[manager_mode, use_teams])(
M("New", m="create"),
M("List All"),
),
M("Job Title Catalog", f="job_title",
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
#M("Skill Catalog", f="skill",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
# #M("Skill Provisions", f="skill_provision"),
#),
#M("Training Events", f="training_event",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
# M("Search", m="search"),
# M("Search Training Participants", f="training",
# m="search"),
# M("Import Participant List", f="training", m="import"),
#),
#M("Training Course Catalog", f="course",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
# #M("Course Certificates", f="course_certificate"),
#),
#M("Certificate Catalog", f="certificate",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
# #M("Skill Equivalence", f="certificate_skill"),
#),
M("Reports", f="staff", m="report",
check=manager_mode)(
M("Staff Report", m="report"),
M("Expiring Staff Contracts Report",
vars=dict(expiring=1)),
# M("Training Report", f="training", m="report"),
),
M("Personal Profile", f="person",
check=personal_mode, vars=dict(mode="personal")),
# This provides the link to switch to the manager mode:
M("Staff Management", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
#M("Personal Profile", f="person",
# check=manager_mode, vars=dict(mode="personal"))
)
# -------------------------------------------------------------------------
def vol(self):
""" Volunteer Management """
s3 = current.session.s3
ADMIN = s3.system_roles.ADMIN
# Custom conditions for the check-hook, as lambdas in order
# to have them checked only immediately before rendering:
manager_mode = lambda i: s3.hrm.mode is None
personal_mode = lambda i: s3.hrm.mode is not None
is_org_admin = lambda i: s3.hrm.orgs and True or \
ADMIN in s3.roles
settings = current.deployment_settings
show_programmes = lambda i: settings.get_hrm_vol_experience() == "programme"
show_tasks = lambda i: settings.has_module("project") and \
settings.get_project_mode_task()
teams = settings.get_hrm_teams()
use_teams = lambda i: teams
return M(c="vol")(
M("Volunteers", f="volunteer",
check=[manager_mode])(
M("New", m="create"),
M("List All"),
M("Search", m="search"),
M("Import", f="person", m="import",
vars={"group":"volunteer"}, p="create"),
),
M(teams, f="group",
check=[manager_mode, use_teams])(
M("New", m="create"),
M("List All"),
),
M("Volunteer Role Catalog", f="job_title",
check=manager_mode)(
M("New", m="create"),
M("List All"),
),
#M("Skill Catalog", f="skill",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
# #M("Skill Provisions", f="skill_provision"),
#),
#M("Training Events", f="training_event",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
# M("Search", m="search"),
# M("Search Training Participants", f="training",
# m="search"),
# M("Import Participant List", f="training", m="import"),
#),
#M("Training Course Catalog", f="course",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
# #M("Course Certificates", f="course_certificate"),
#),
#M("Certificate Catalog", f="certificate",
# check=manager_mode)(
# M("New", m="create"),
# M("List All"),
# #M("Skill Equivalence", f="certificate_skill"),
#),
#M("Programmes", f="programme",
# check=[manager_mode, show_programmes])(
# M("New", m="create"),
# M("List All"),
# M("Import Hours", f="programme_hours", m="import"),
#),
M("Reports", f="volunteer", m="report",
check=manager_mode)(
M("Volunteer Report", m="report"),
# M("Training Report", f="training", m="report"),
),
M("My Profile", f="person",
check=personal_mode, vars=dict(mode="personal")),
M("My Tasks", f="task",
check=[personal_mode, show_tasks],
vars=dict(mode="personal",
mine=1)),
# This provides the link to switch to the manager mode:
M("Volunteer Management", f="index",
check=[personal_mode, is_org_admin]),
# This provides the link to switch to the personal mode:
#M("Personal Profile", f="person",
# check=manager_mode, vars=dict(mode="personal"))
)
# END =========================================================================
| |
import itertools
import warnings
from collections import Counter, OrderedDict
import pandas as pd
from . import utils
from .alignment import align
from .merge import merge
from .variable import IndexVariable, Variable, as_variable
from .variable import concat as concat_vars
from .computation import result_name
def concat(objs, dim=None, data_vars='all', coords='different',
compat='equals', positions=None, indexers=None, mode=None,
concat_over=None):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add join and ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError('must supply at least one object to concatenate')
if dim is None:
warnings.warn('the `dim` argument to `concat` will be required '
'in a future version of xarray; for now, setting it to '
"the old default of 'concat_dim'",
FutureWarning, stacklevel=2)
dim = 'concat_dims'
if indexers is not None: # pragma: nocover
warnings.warn('indexers has been renamed to positions; the alias '
'will be removed in a future version of xarray',
FutureWarning, stacklevel=2)
positions = indexers
if mode is not None:
raise ValueError('`mode` is no longer a valid argument to '
'xarray.concat; it has been split into the '
'`data_vars` and `coords` arguments')
if concat_over is not None:
raise ValueError('`concat_over` is no longer a valid argument to '
'xarray.concat; it has been split into the '
'`data_vars` and `coords` arguments')
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError('can only concatenate xarray Dataset and DataArray '
'objects, got %s' % type(first_obj))
return f(objs, dim, data_vars, coords, compat, positions)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
from .dataarray import DataArray
if isinstance(dim, str):
coord = None
elif not isinstance(dim, (DataArray, Variable)):
dim_name = getattr(dim, 'name', None)
if dim_name is None:
dim_name = 'concat_dim'
coord = IndexVariable(dim_name, dim)
dim = dim_name
elif not isinstance(dim, DataArray):
coord = as_variable(dim).to_index_variable()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
# Return values
concat_over = set()
equals = {}
if dim in datasets[0]:
concat_over.add(dim)
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items()
if dim in v.dims)
def process_subset_opt(opt, subset):
if isinstance(opt, str):
if opt == 'different':
# all nonindexes that are not the same in each dataset
for k in getattr(datasets[0], subset):
if k not in concat_over:
# Compare the variable of all datasets vs. the one
# of the first dataset. Perform the minimum amount of
# loads in order to avoid multiple loads from disk
# while keeping the RAM footprint low.
v_lhs = datasets[0].variables[k].load()
# We'll need to know later on if variables are equal.
computed = []
for ds_rhs in datasets[1:]:
v_rhs = ds_rhs.variables[k].compute()
computed.append(v_rhs)
if not v_lhs.equals(v_rhs):
concat_over.add(k)
equals[k] = False
# computed variables are not to be re-computed
# again in the future
for ds, v in zip(datasets[1:], computed):
ds.variables[k].data = v.data
break
else:
equals[k] = True
elif opt == 'all':
concat_over.update(set(getattr(datasets[0], subset)) -
set(datasets[0].dims))
elif opt == 'minimal':
pass
else:
raise ValueError("unexpected value for %s: %s" % (subset, opt))
else:
invalid_vars = [k for k in opt
if k not in getattr(datasets[0], subset)]
if invalid_vars:
if subset == 'coords':
raise ValueError(
'some variables in coords are not coordinates on '
'the first dataset: %s' % (invalid_vars,))
else:
raise ValueError(
'some variables in data_vars are not data variables '
'on the first dataset: %s' % (invalid_vars,))
concat_over.update(opt)
process_subset_opt(data_vars, 'data_vars')
process_subset_opt(coords, 'coords')
return concat_over, equals
def _dataset_concat(datasets, dim, data_vars, coords, compat, positions):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset
if compat not in ['equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'equals' "
"or 'identical'" % compat)
dim, coord = _calc_concat_dim_coord(dim)
# Make sure we're working on a copy (we'll be loading variables)
datasets = [ds.copy() for ds in datasets]
datasets = align(*datasets, join='outer', copy=False, exclude=[dim])
concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
result_encoding = datasets[0].encoding
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if (compat == 'identical' and
not utils.dict_equiv(ds.attrs, result_attrs)):
raise ValueError('dataset global attributes not equal')
for k, v in ds.variables.items():
if k not in result_vars and k not in concat_over:
raise ValueError('encountered unexpected variable %r' % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError('%r is a coordinate in some datasets but not '
'others' % k)
elif k in result_vars and k != dim:
# Don't use Variable.identical as it internally invokes
# Variable.equals, and we may already know the answer
if compat == 'identical' and not utils.dict_equiv(
v.attrs, result_vars[k].attrs):
raise ValueError(
'variable %s not identical across datasets' % k)
# Proceed with equals()
try:
# May be populated when using the "different" method
is_equal = equals[k]
except KeyError:
result_vars[k].load()
is_equal = v.equals(result_vars[k])
if not is_equal:
raise ValueError(
'variable %s not equal across datasets' % k)
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(non_concat_dims.get(d, dim_len)
for d in common_dims)
var = var.set_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
result.encoding = result_encoding
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
def _dataarray_concat(arrays, dim, data_vars, coords, compat,
positions):
arrays = list(arrays)
if data_vars != 'all':
raise ValueError('data_vars is not a valid argument when '
'concatenating DataArray objects')
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == 'identical':
raise ValueError('array names not identical')
else:
arr = arr.rename(name)
datasets.append(arr._to_temp_dataset())
ds = _dataset_concat(datasets, dim, data_vars, coords, compat,
positions)
result = arrays[0]._from_temp_dataset(ds, name)
result.name = result_name(arrays)
return result
def _auto_concat(datasets, dim=None, data_vars='all', coords='different'):
if len(datasets) == 1 and dim is None:
# There is nothing more to combine, so kick out early.
return datasets[0]
else:
if dim is None:
ds0 = datasets[0]
ds1 = datasets[1]
concat_dims = set(ds0.dims)
if ds0.dims != ds1.dims:
dim_tuples = set(ds0.dims.items()) - set(ds1.dims.items())
concat_dims = set(i for i, _ in dim_tuples)
if len(concat_dims) > 1:
concat_dims = set(d for d in concat_dims
if not ds0[d].equals(ds1[d]))
if len(concat_dims) > 1:
raise ValueError('too many different dimensions to '
'concatenate: %s' % concat_dims)
elif len(concat_dims) == 0:
raise ValueError('cannot infer dimension to concatenate: '
'supply the ``concat_dim`` argument '
'explicitly')
dim, = concat_dims
return concat(datasets, dim=dim, data_vars=data_vars, coords=coords)
_CONCAT_DIM_DEFAULT = utils.ReprObject('<inferred>')
def _infer_concat_order_from_positions(datasets, concat_dims):
combined_ids = OrderedDict(_infer_tile_ids_from_nested_list(datasets, ()))
tile_id, ds = list(combined_ids.items())[0]
n_dims = len(tile_id)
if concat_dims == _CONCAT_DIM_DEFAULT or concat_dims is None:
concat_dims = [concat_dims] * n_dims
else:
if len(concat_dims) != n_dims:
raise ValueError("concat_dims has length {} but the datasets "
"passed are nested in a {}-dimensional "
"structure".format(str(len(concat_dims)),
str(n_dims)))
return combined_ids, concat_dims
def _infer_tile_ids_from_nested_list(entry, current_pos):
"""
Given a list of lists (of lists...) of objects, returns a iterator
which returns a tuple containing the index of each object in the nested
list structure as the key, and the object. This can then be called by the
dict constructor to create a dictionary of the objects organised by their
position in the original nested list.
Recursively traverses the given structure, while keeping track of the
current position. Should work for any type of object which isn't a list.
Parameters
----------
entry : list[list[obj, obj, ...]]
List of lists of arbitrary depth, containing objects in the order
they are to be concatenated.
Returns
-------
combined_tile_ids : dict[tuple(int, ...), obj]
"""
if isinstance(entry, list):
for i, item in enumerate(entry):
for result in _infer_tile_ids_from_nested_list(item,
current_pos + (i,)):
yield result
else:
yield current_pos, entry
def _check_shape_tile_ids(combined_tile_ids):
tile_ids = combined_tile_ids.keys()
# Check all tuples are the same length
# i.e. check that all lists are nested to the same depth
nesting_depths = [len(tile_id) for tile_id in tile_ids]
if not set(nesting_depths) == {nesting_depths[0]}:
raise ValueError("The supplied objects do not form a hypercube because"
" sub-lists do not have consistent depths")
# Check all lists along one dimension are same length
for dim in range(nesting_depths[0]):
indices_along_dim = [tile_id[dim] for tile_id in tile_ids]
occurrences = Counter(indices_along_dim)
if len(set(occurrences.values())) != 1:
raise ValueError("The supplied objects do not form a hypercube "
"because sub-lists do not have consistent "
"lengths along dimension" + str(dim))
def _combine_nd(combined_ids, concat_dims, data_vars='all',
coords='different', compat='no_conflicts'):
"""
Concatenates and merges an N-dimensional structure of datasets.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match
Returns
-------
combined_ds : xarray.Dataset
"""
# Perform N-D dimensional concatenation
# Each iteration of this loop reduces the length of the tile_ids tuples
# by one. It always combines along the first dimension, removing the first
# element of the tuple
for concat_dim in concat_dims:
combined_ids = _auto_combine_all_along_first_dim(combined_ids,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat)
combined_ds = list(combined_ids.values())[0]
return combined_ds
def _auto_combine_all_along_first_dim(combined_ids, dim, data_vars,
coords, compat):
# Group into lines of datasets which must be combined along dim
# need to sort by _new_tile_id first for groupby to work
# TODO remove all these sorted OrderedDicts once python >= 3.6 only
combined_ids = OrderedDict(sorted(combined_ids.items(), key=_new_tile_id))
grouped = itertools.groupby(combined_ids.items(), key=_new_tile_id)
new_combined_ids = {}
for new_id, group in grouped:
combined_ids = OrderedDict(sorted(group))
datasets = combined_ids.values()
new_combined_ids[new_id] = _auto_combine_1d(datasets, dim, compat,
data_vars, coords)
return new_combined_ids
def vars_as_keys(ds):
return tuple(sorted(ds))
def _auto_combine_1d(datasets, concat_dim=_CONCAT_DIM_DEFAULT,
compat='no_conflicts',
data_vars='all', coords='different'):
# This is just the old auto_combine function (which only worked along 1D)
if concat_dim is not None:
dim = None if concat_dim is _CONCAT_DIM_DEFAULT else concat_dim
sorted_datasets = sorted(datasets, key=vars_as_keys)
grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
concatenated = [_auto_concat(list(ds_group), dim=dim,
data_vars=data_vars, coords=coords)
for id, ds_group in grouped_by_vars]
else:
concatenated = datasets
merged = merge(concatenated, compat=compat)
return merged
def _new_tile_id(single_id_ds_pair):
tile_id, ds = single_id_ds_pair
return tile_id[1:]
def _auto_combine(datasets, concat_dims, compat, data_vars, coords,
infer_order_from_coords, ids):
"""
Calls logic to decide concatenation order before concatenating.
"""
# Arrange datasets for concatenation
if infer_order_from_coords:
raise NotImplementedError
# TODO Use coordinates to determine tile_ID for each dataset in N-D
# Ignore how they were ordered previously
# Should look like:
# combined_ids, concat_dims = _infer_tile_ids_from_coords(datasets,
# concat_dims)
else:
# Use information from the shape of the user input
if not ids:
# Determine tile_IDs by structure of input in N-D
# (i.e. ordering in list-of-lists)
combined_ids, concat_dims = _infer_concat_order_from_positions(
datasets, concat_dims)
else:
# Already sorted so just use the ids already passed
combined_ids = OrderedDict(zip(ids, datasets))
# Check that the inferred shape is combinable
_check_shape_tile_ids(combined_ids)
# Repeatedly concatenate then merge along each dimension
combined = _combine_nd(combined_ids, concat_dims, compat=compat,
data_vars=data_vars, coords=coords)
return combined
def auto_combine(datasets, concat_dim=_CONCAT_DIM_DEFAULT,
compat='no_conflicts', data_vars='all', coords='different'):
"""Attempt to auto-magically combine the given datasets into one.
This method attempts to combine a list of datasets into a single entity by
inspecting metadata and using a combination of concat and merge.
It does not concatenate along more than one dimension or sort data under
any circumstances. It does align coordinates, but different variables on
datasets can cause it to fail under some scenarios. In complex cases, you
may need to clean up your data and use ``concat``/``merge`` explicitly.
``auto_combine`` works well if you have N years of data and M data
variables, and each combination of a distinct time period and set of data
variables is saved its own dataset.
Parameters
----------
datasets : sequence of xarray.Dataset
Dataset objects to merge.
concat_dim : str or DataArray or Index, optional
Dimension along which to concatenate variables, as used by
:py:func:`xarray.concat`. You only need to provide this argument if
the dimension along which you want to concatenate is not a dimension
in the original datasets, e.g., if you want to stack a collection of
2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
Details are in the documentation of concat
coords : {'minimal', 'different', 'all' or list of str}, optional
Details are in the documentation of conca
Returns
-------
combined : xarray.Dataset
See also
--------
concat
Dataset.merge
""" # noqa
# Coerce 1D input into ND to maintain backwards-compatible API until API
# for N-D combine decided
# (see https://github.com/pydata/xarray/pull/2553/#issuecomment-445892746)
if concat_dim is None or concat_dim == _CONCAT_DIM_DEFAULT:
concat_dims = concat_dim
elif not isinstance(concat_dim, list):
concat_dims = [concat_dim]
else:
concat_dims = concat_dim
infer_order_from_coords = False
# The IDs argument tells _auto_combine that the datasets are not yet sorted
return _auto_combine(datasets, concat_dims=concat_dims, compat=compat,
data_vars=data_vars, coords=coords,
infer_order_from_coords=infer_order_from_coords,
ids=False)
| |
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser, User
from djblets.testing.decorators import add_fixtures
from reviewboard.reviews.models import Group
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.testing import TestCase
class PolicyTests(TestCase):
fixtures = ['test_users']
def setUp(self):
super(PolicyTests, self).setUp()
self.user = User.objects.create(username='testuser', password='')
self.anonymous = AnonymousUser()
def test_group_public(self):
"""Testing access to a public review group"""
group = Group.objects.create(name='test-group')
self.assertFalse(group.invite_only)
self.assertTrue(group.is_accessible_by(self.user))
self.assertTrue(group.is_accessible_by(self.anonymous))
self.assertIn(group, Group.objects.accessible(self.user))
self.assertIn(group, Group.objects.accessible(self.anonymous))
def test_group_invite_only_access_denied(self):
"""Testing no access to unjoined invite-only group"""
group = Group.objects.create(name='test-group', invite_only=True)
self.assertTrue(group.invite_only)
self.assertFalse(group.is_accessible_by(self.user))
self.assertFalse(group.is_accessible_by(self.anonymous))
self.assertNotIn(group, Group.objects.accessible(self.user))
self.assertNotIn(group, Group.objects.accessible(self.anonymous))
def test_group_invite_only_access_allowed(self):
"""Testing access to joined invite-only group"""
group = Group.objects.create(name='test-group', invite_only=True)
group.users.add(self.user)
self.assertTrue(group.invite_only)
self.assertTrue(group.is_accessible_by(self.user))
self.assertFalse(group.is_accessible_by(self.anonymous))
self.assertIn(group, Group.objects.accessible(self.user))
self.assertNotIn(group, Group.objects.accessible(self.anonymous))
def test_group_public_hidden(self):
"""Testing visibility of a hidden public group"""
group = Group.objects.create(name='test-group', visible=False)
self.assertFalse(group.visible)
self.assertTrue(group.is_accessible_by(self.user))
self.assertTrue(
group in Group.objects.accessible(self.user, visible_only=False))
self.assertFalse(
group in Group.objects.accessible(self.user, visible_only=True))
def test_group_invite_only_hidden_access_denied(self):
"""Testing visibility of a hidden unjoined invite-only group"""
group = Group.objects.create(name='test-group', visible=False,
invite_only=True)
self.assertFalse(group.visible)
self.assertTrue(group.invite_only)
self.assertFalse(group.is_accessible_by(self.user))
self.assertFalse(
group in Group.objects.accessible(self.user, visible_only=False))
self.assertFalse(
group in Group.objects.accessible(self.user, visible_only=True))
def test_group_invite_only_hidden_access_allowed(self):
"""Testing visibility of a hidden joined invite-only group"""
group = Group.objects.create(name='test-group', visible=False,
invite_only=True)
group.users.add(self.user)
self.assertFalse(group.visible)
self.assertTrue(group.invite_only)
self.assertTrue(group.is_accessible_by(self.user))
self.assertTrue(
group in Group.objects.accessible(self.user, visible_only=False))
self.assertTrue(
group in Group.objects.accessible(self.user, visible_only=True))
def test_group_invite_only_review_request_ownership(self):
"""Testing visibility of review requests assigned to invite-only
groups by a non-member
"""
group = Group.objects.create(name='test-group', visible=False,
invite_only=True)
review_request = self.create_review_request(publish=True,
submitter=self.user)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(self.user))
@add_fixtures(['test_scmtools'])
def test_repository_public(self):
"""Testing access to a public repository"""
tool = Tool.objects.get(name='CVS')
repo = Repository.objects.create(name='Test1', path='path1', tool=tool)
self.assertTrue(repo.public)
self.assertTrue(repo.is_accessible_by(self.user))
self.assertTrue(repo.is_accessible_by(self.anonymous))
@add_fixtures(['test_scmtools'])
def test_repository_private_access_denied(self):
"""Testing no access to a private repository"""
tool = Tool.objects.get(name='CVS')
repo = Repository.objects.create(name='Test1', path='path1', tool=tool,
public=False)
self.assertFalse(repo.public)
self.assertFalse(repo.is_accessible_by(self.user))
self.assertFalse(repo.is_accessible_by(self.anonymous))
@add_fixtures(['test_scmtools'])
def test_repository_private_access_allowed_by_user(self):
"""Testing access to a private repository with user added"""
tool = Tool.objects.get(name='CVS')
repo = Repository.objects.create(name='Test1', path='path1', tool=tool,
public=False)
repo.users.add(self.user)
self.assertFalse(repo.public)
self.assertTrue(repo.is_accessible_by(self.user))
self.assertFalse(repo.is_accessible_by(self.anonymous))
@add_fixtures(['test_scmtools'])
def test_repository_private_access_allowed_by_review_group(self):
"""Testing access to a private repository with joined review group
added
"""
group = Group.objects.create(name='test-group', invite_only=True)
group.users.add(self.user)
tool = Tool.objects.get(name='CVS')
repo = Repository.objects.create(name='Test1', path='path1', tool=tool,
public=False)
repo.review_groups.add(group)
self.assertFalse(repo.public)
self.assertTrue(repo.is_accessible_by(self.user))
self.assertFalse(repo.is_accessible_by(self.anonymous))
def test_review_request_public(self):
"""Testing access to a public review request"""
review_request = self.create_review_request(publish=True)
self.assertTrue(review_request.is_accessible_by(self.user))
self.assertTrue(review_request.is_accessible_by(self.anonymous))
def test_review_request_with_invite_only_group(self):
"""Testing no access to a review request with only an unjoined
invite-only group
"""
group = Group(name='test-group', invite_only=True)
group.save()
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
self.assertFalse(review_request.is_accessible_by(self.user))
self.assertFalse(review_request.is_accessible_by(self.anonymous))
def test_review_request_with_invite_only_group_and_target_user(self):
"""Testing access to a review request with specific target user and
invite-only group
"""
group = Group(name='test-group', invite_only=True)
group.save()
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
review_request.target_people.add(self.user)
self.assertTrue(review_request.is_accessible_by(self.user))
self.assertFalse(review_request.is_accessible_by(self.anonymous))
@add_fixtures(['test_scmtools'])
def test_review_request_with_private_repository(self):
"""Testing no access to a review request with a private repository"""
Group.objects.create(name='test-group', invite_only=True)
review_request = self.create_review_request(create_repository=True,
publish=True)
review_request.repository.public = False
review_request.repository.save()
self.assertFalse(review_request.is_accessible_by(self.user))
self.assertFalse(review_request.is_accessible_by(self.anonymous))
@add_fixtures(['test_scmtools'])
def test_review_request_with_private_repository_allowed_by_user(self):
"""Testing access to a review request with a private repository with
user added
"""
Group.objects.create(name='test-group', invite_only=True)
review_request = self.create_review_request(create_repository=True,
publish=True)
review_request.repository.public = False
review_request.repository.users.add(self.user)
review_request.repository.save()
self.assertTrue(review_request.is_accessible_by(self.user))
self.assertFalse(review_request.is_accessible_by(self.anonymous))
@add_fixtures(['test_scmtools'])
def test_review_request_with_private_repository_allowed_by_review_group(
self):
"""Testing access to a review request with a private repository with
review group added
"""
group = Group.objects.create(name='test-group', invite_only=True)
group.users.add(self.user)
review_request = self.create_review_request(create_repository=True,
publish=True)
review_request.repository.public = False
review_request.repository.review_groups.add(group)
review_request.repository.save()
self.assertTrue(review_request.is_accessible_by(self.user))
self.assertFalse(review_request.is_accessible_by(self.anonymous))
| |
"""
Test SBTarget APIs.
"""
from __future__ import print_function
import unittest2
import os
import time
import re
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TargetAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to of function 'c'.
self.line1 = line_number(
'main.c', '// Find the line number for breakpoint 1 here.')
self.line2 = line_number(
'main.c', '// Find the line number for breakpoint 2 here.')
self.line_main = line_number(
"main.c", "// Set a break at entry to main.")
# rdar://problem/9700873
# Find global variable value fails for dwarf if inferior not started
# (Was CrashTracer: [USER] 1 crash in Python at _lldb.so: lldb_private::MemoryCache::Read + 94)
#
# It does not segfaults now. But for dwarf, the variable value is None if
# the inferior process does not exist yet. The radar has been updated.
#@unittest232.skip("segmentation fault -- skipping")
@add_test_categories(['pyapi'])
def test_find_global_variables(self):
"""Exercise SBTarget.FindGlobalVariables() API."""
d = {'EXE': 'b.out'}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
self.find_global_variables('b.out')
@add_test_categories(['pyapi'])
def test_find_compile_units(self):
"""Exercise SBTarget.FindCompileUnits() API."""
d = {'EXE': 'b.out'}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
self.find_compile_units(self.getBuildArtifact('b.out'))
@add_test_categories(['pyapi'])
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24778")
def test_find_functions(self):
"""Exercise SBTarget.FindFunctions() API."""
d = {'EXE': 'b.out'}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
self.find_functions('b.out')
@add_test_categories(['pyapi'])
def test_get_description(self):
"""Exercise SBTarget.GetDescription() API."""
self.build()
self.get_description()
@add_test_categories(['pyapi'])
@expectedFailureAll(oslist=["windows"], bugnumber='llvm.org/pr21765')
def test_resolve_symbol_context_with_address(self):
"""Exercise SBTarget.ResolveSymbolContextForAddress() API."""
self.build()
self.resolve_symbol_context_with_address()
@add_test_categories(['pyapi'])
def test_get_platform(self):
d = {'EXE': 'b.out'}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
target = self.create_simple_target('b.out')
platform = target.platform
self.assertTrue(platform, VALID_PLATFORM)
@add_test_categories(['pyapi'])
def test_get_data_byte_size(self):
d = {'EXE': 'b.out'}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
target = self.create_simple_target('b.out')
self.assertEqual(target.data_byte_size, 1)
@add_test_categories(['pyapi'])
def test_get_code_byte_size(self):
d = {'EXE': 'b.out'}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
target = self.create_simple_target('b.out')
self.assertEqual(target.code_byte_size, 1)
@add_test_categories(['pyapi'])
def test_resolve_file_address(self):
d = {'EXE': 'b.out'}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
target = self.create_simple_target('b.out')
# find the file address in the .data section of the main
# module
data_section = self.find_data_section(target)
data_section_addr = data_section.file_addr
# resolve the above address, and compare the address produced
# by the resolution against the original address/section
res_file_addr = target.ResolveFileAddress(data_section_addr)
self.assertTrue(res_file_addr.IsValid())
self.assertEqual(data_section_addr, res_file_addr.file_addr)
data_section2 = res_file_addr.section
self.assertIsNotNone(data_section2)
self.assertEqual(data_section.name, data_section2.name)
@add_test_categories(['pyapi'])
def test_read_memory(self):
d = {'EXE': 'b.out'}
self.build(dictionary=d)
self.setTearDownCleanup(dictionary=d)
target = self.create_simple_target('b.out')
breakpoint = target.BreakpointCreateByLocation(
"main.c", self.line_main)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Put debugger into synchronous mode so when we target.LaunchSimple returns
# it will guaranteed to be at the breakpoint
self.dbg.SetAsync(False)
# Launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
# find the file address in the .data section of the main
# module
data_section = self.find_data_section(target)
sb_addr = lldb.SBAddress(data_section, 0)
error = lldb.SBError()
content = target.ReadMemory(sb_addr, 1, error)
self.assertTrue(error.Success(), "Make sure memory read succeeded")
self.assertEqual(len(content), 1)
def create_simple_target(self, fn):
exe = self.getBuildArtifact(fn)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
return target
def find_data_section(self, target):
mod = target.GetModuleAtIndex(0)
data_section = None
for s in mod.sections:
sect_type = s.GetSectionType()
if sect_type == lldb.eSectionTypeData:
data_section = s
break
elif sect_type == lldb.eSectionTypeContainer:
for i in range(s.GetNumSubSections()):
ss = s.GetSubSectionAtIndex(i)
sect_type = ss.GetSectionType()
if sect_type == lldb.eSectionTypeData:
data_section = ss
break
self.assertIsNotNone(data_section)
return data_section
def find_global_variables(self, exe_name):
"""Exercise SBTaget.FindGlobalVariables() API."""
exe = self.getBuildArtifact(exe_name)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# rdar://problem/9700873
# Find global variable value fails for dwarf if inferior not started
# (Was CrashTracer: [USER] 1 crash in Python at _lldb.so: lldb_private::MemoryCache::Read + 94)
#
# Remove the lines to create a breakpoint and to start the inferior
# which are workarounds for the dwarf case.
breakpoint = target.BreakpointCreateByLocation('main.c', self.line1)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Make sure we hit our breakpoint:
thread_list = lldbutil.get_threads_stopped_at_breakpoint(
process, breakpoint)
self.assertTrue(len(thread_list) == 1)
value_list = target.FindGlobalVariables(
'my_global_var_of_char_type', 3)
self.assertTrue(value_list.GetSize() == 1)
my_global_var = value_list.GetValueAtIndex(0)
self.DebugSBValue(my_global_var)
self.assertTrue(my_global_var)
self.expect(my_global_var.GetName(), exe=False,
startstr="my_global_var_of_char_type")
self.expect(my_global_var.GetTypeName(), exe=False,
startstr="char")
self.expect(my_global_var.GetValue(), exe=False,
startstr="'X'")
# While we are at it, let's also exercise the similar
# SBModule.FindGlobalVariables() API.
for m in target.module_iter():
if os.path.normpath(m.GetFileSpec().GetDirectory()) == self.getBuildDir() and m.GetFileSpec().GetFilename() == exe_name:
value_list = m.FindGlobalVariables(
target, 'my_global_var_of_char_type', 3)
self.assertTrue(value_list.GetSize() == 1)
self.assertTrue(
value_list.GetValueAtIndex(0).GetValue() == "'X'")
break
def find_compile_units(self, exe):
"""Exercise SBTarget.FindCompileUnits() API."""
source_name = "main.c"
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
list = target.FindCompileUnits(lldb.SBFileSpec(source_name, False))
# Executable has been built just from one source file 'main.c',
# so we may check only the first element of list.
self.assertTrue(
list[0].GetCompileUnit().GetFileSpec().GetFilename() == source_name)
def find_functions(self, exe_name):
"""Exercise SBTaget.FindFunctions() API."""
exe = self.getBuildArtifact(exe_name)
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
list = target.FindFunctions('c', lldb.eFunctionNameTypeAuto)
self.assertTrue(list.GetSize() == 1)
for sc in list:
self.assertTrue(
sc.GetModule().GetFileSpec().GetFilename() == exe_name)
self.assertTrue(sc.GetSymbol().GetName() == 'c')
def get_description(self):
"""Exercise SBTaget.GetDescription() API."""
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
from lldbsuite.test.lldbutil import get_description
# get_description() allows no option to mean
# lldb.eDescriptionLevelBrief.
desc = get_description(target)
#desc = get_description(target, option=lldb.eDescriptionLevelBrief)
if not desc:
self.fail("SBTarget.GetDescription() failed")
self.expect(desc, exe=False,
substrs=['a.out'])
self.expect(desc, exe=False, matching=False,
substrs=['Target', 'Module', 'Breakpoint'])
desc = get_description(target, option=lldb.eDescriptionLevelFull)
if not desc:
self.fail("SBTarget.GetDescription() failed")
self.expect(desc, exe=False,
substrs=['a.out', 'Target', 'Module', 'Breakpoint'])
@not_remote_testsuite_ready
@add_test_categories(['pyapi'])
@no_debug_info_test
def test_launch_new_process_and_redirect_stdout(self):
"""Exercise SBTaget.Launch() API with redirected stdout."""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Add an extra twist of stopping the inferior in a breakpoint, and then continue till it's done.
# We should still see the entire stdout redirected once the process is
# finished.
line = line_number('main.c', '// a(3) -> c(3)')
breakpoint = target.BreakpointCreateByLocation('main.c', line)
# Now launch the process, do not stop at entry point, and redirect stdout to "stdout.txt" file.
# The inferior should run to completion after "process.Continue()"
# call.
local_path = self.getBuildArtifact("stdout.txt")
if os.path.exists(local_path):
os.remove(local_path)
if lldb.remote_platform:
stdout_path = lldbutil.append_to_process_working_directory(self,
"lldb-stdout-redirect.txt")
else:
stdout_path = local_path
error = lldb.SBError()
process = target.Launch(
self.dbg.GetListener(),
None,
None,
None,
stdout_path,
None,
None,
0,
False,
error)
process.Continue()
#self.runCmd("process status")
if lldb.remote_platform:
# copy output file to host
lldb.remote_platform.Get(
lldb.SBFileSpec(stdout_path),
lldb.SBFileSpec(local_path))
# The 'stdout.txt' file should now exist.
self.assertTrue(
os.path.isfile(local_path),
"'stdout.txt' exists due to redirected stdout via SBTarget.Launch() API.")
# Read the output file produced by running the program.
with open(local_path, 'r') as f:
output = f.read()
self.expect(output, exe=False,
substrs=["a(1)", "b(2)", "a(3)"])
def resolve_symbol_context_with_address(self):
"""Exercise SBTaget.ResolveSymbolContextForAddress() API."""
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create the two breakpoints inside function 'a'.
breakpoint1 = target.BreakpointCreateByLocation('main.c', self.line1)
breakpoint2 = target.BreakpointCreateByLocation('main.c', self.line2)
#print("breakpoint1:", breakpoint1)
#print("breakpoint2:", breakpoint2)
self.assertTrue(breakpoint1 and
breakpoint1.GetNumLocations() == 1,
VALID_BREAKPOINT)
self.assertTrue(breakpoint2 and
breakpoint2.GetNumLocations() == 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# Frame #0 should be on self.line1.
self.assertTrue(process.GetState() == lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint condition")
#self.runCmd("process status")
frame0 = thread.GetFrameAtIndex(0)
lineEntry = frame0.GetLineEntry()
self.assertTrue(lineEntry.GetLine() == self.line1)
address1 = lineEntry.GetStartAddress()
# Continue the inferior, the breakpoint 2 should be hit.
process.Continue()
self.assertTrue(process.GetState() == lldb.eStateStopped)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertTrue(
thread.IsValid(),
"There should be a thread stopped due to breakpoint condition")
#self.runCmd("process status")
frame0 = thread.GetFrameAtIndex(0)
lineEntry = frame0.GetLineEntry()
self.assertTrue(lineEntry.GetLine() == self.line2)
address2 = lineEntry.GetStartAddress()
#print("address1:", address1)
#print("address2:", address2)
# Now call SBTarget.ResolveSymbolContextForAddress() with the addresses
# from our line entry.
context1 = target.ResolveSymbolContextForAddress(
address1, lldb.eSymbolContextEverything)
context2 = target.ResolveSymbolContextForAddress(
address2, lldb.eSymbolContextEverything)
self.assertTrue(context1 and context2)
#print("context1:", context1)
#print("context2:", context2)
# Verify that the context point to the same function 'a'.
symbol1 = context1.GetSymbol()
symbol2 = context2.GetSymbol()
self.assertTrue(symbol1 and symbol2)
#print("symbol1:", symbol1)
#print("symbol2:", symbol2)
from lldbsuite.test.lldbutil import get_description
desc1 = get_description(symbol1)
desc2 = get_description(symbol2)
self.assertTrue(desc1 and desc2 and desc1 == desc2,
"The two addresses should resolve to the same symbol")
| |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 23:43:30 2015
@author: ajaver
"""
#import os
import sys
import os
import time
import subprocess as sp
from functools import partial
from io import StringIO
from tierpsy.helper.misc import TimeCounter, ReadEnqueue
GUI_CLEAR_SIGNAL = '+++++++++++++++++++++++++++++++++++++++++++++++++'
class CapturingOutput(list):
'''modified from http://stackoverflow.com/questions/1218933/can-i-redirect-the-stdout-in-python-into-some-sort-of-string-buffer'''
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend([x + '\n' for x in self._stringio.getvalue().splitlines()])
sys.stdout = self._stdout
ON_POSIX = 'posix' in sys.builtin_module_names
class StartProcess():
def __init__(self, cmd, local_obj='', is_debug = True):
self.is_debug = is_debug
self.output = ['Started\n']
if local_obj:
with CapturingOutput() as output:
if cmd[0] == sys.executable:
cmd = cmd[1:]
self.obj_cmd = local_obj(cmd)
self.cmd = self.obj_cmd.start()
self.output += output
else:
self.obj_cmd = ''
self.cmd = cmd
self.output = ['Started\n']
self.output += [cmdlist2str(self.cmd) + '\n']
self.proc = sp.Popen(self.cmd, stdout=sp.PIPE, stderr=sp.PIPE,
bufsize=1, close_fds=ON_POSIX)
self.buf_reader = ReadEnqueue(self.proc .stdout)
def read_buff(self):
while True:
# read line without blocking
line = self.buf_reader.read()
if line is not None:
self.output.append(line)
else:
break
# store only the last line
self.output = self.output[-1:]
def close(self):
if self.proc.poll() != 0:
error_outputs = self.proc.stderr.read().decode("utf-8")
# print errors details if there was any
self.output[-1] += 'ERROR: \n'
#I want to add only the last line of the error. No traceback info in order to do not overwhelm the user.
dd = error_outputs.split('\n')
if len(dd) > 1:
self.output[-1] += dd[-2] + '\n'
if self.is_debug:
self.output[-1] += error_outputs
self.output[-1] += cmdlist2str(self.cmd) + '\n'
self.proc.stderr.flush()
if self.obj_cmd and self.proc.poll() == 0:
with CapturingOutput() as output:
self.obj_cmd.clean()
self.output += output
self.proc.wait()
self.proc.stdout.close()
self.proc.stderr.close()
def RunMultiCMD(cmd_list,
local_obj='',
max_num_process=3,
refresh_time=10,
is_debug = True):
'''Start different process using the command is cmd_list'''
start_obj = partial(StartProcess, local_obj=local_obj, is_debug=is_debug)
total_timer = TimeCounter() #timer to meassure the total time
cmd_list = cmd_list[::-1] # since I am using pop to get the next element i need to invert the list to get athe same order
tot_tasks = len(cmd_list)
if tot_tasks < max_num_process:
max_num_process = tot_tasks
# initialize the first max_number_process in the list
finished_tasks = []
current_tasks = []
for ii in range(max_num_process):
cmd = cmd_list.pop()
current_tasks.append(start_obj(cmd))
# keep loop tasks as long as there are tasks in the list
while current_tasks:
time.sleep(refresh_time)
print(GUI_CLEAR_SIGNAL)
os.system(['clear', 'cls'][os.name == 'nt'])
# print info of the finished tasks
for task_finish_msg in finished_tasks:
sys.stdout.write(task_finish_msg)
# loop along the process list to update output and see if there is any
# task finished
next_tasks = []
#I want to close the tasks after starting the next the tasks. It has de disadvantage of
#requiring more disk space, (required files for the new task + the finished files)
#but at least it should start a new tasks while it is copying the old results.
tasks_to_close = []
for task in current_tasks:
task.read_buff()
if task.proc.poll() is None:
# add task to the new list if it hasn't complete
next_tasks.append(task)
sys.stdout.write(task.output[-1])
else:
# close the task and add its las output to the finished_tasks
# list
tasks_to_close.append(task)
# add new task once the previous one was finished
if cmd_list and len(next_tasks) < max_num_process:
cmd = cmd_list.pop()
next_tasks.append(start_obj(cmd))
# if there is stlll space add a new tasks.
while cmd_list and len(next_tasks) < max_num_process:
cmd = cmd_list.pop()
next_tasks.append(start_obj(cmd))
#close tasks (copy finished files to final destination)
for task in tasks_to_close:
task.close()
sys.stdout.write(task.output[-1])
finished_tasks.append(task.output[-1])
#start the new loop
current_tasks = next_tasks
#display progress
n_finished = len(finished_tasks)
n_remaining = len(current_tasks) + len(cmd_list)
progress_str = 'Tasks: {} finished, {} remaining. Total_time {}.'.format(
n_finished, n_remaining, total_timer.get_time_str())
print('*************************************************')
print(progress_str)
print('*************************************************')
#if i don't add this the GUI could terminate before displaying the last text.
sys.stdout.flush()
time.sleep(1)
def cmdlist2str(cmdlist):
# change the format from the list accepted by Popen to a text string
# accepted by the terminal
for ii, dd in enumerate(cmdlist):
if not dd.startswith('-'):
if os.name != 'nt':
dd = "'" + dd + "'"
else:
if dd.endswith(os.sep):
dd = dd[:-1]
dd = '"' + dd + '"'
if ii == 0:
cmd_str = dd
else:
cmd_str += ' ' + dd
return cmd_str
def print_cmd_list(cmd_list_compress):
# print all the commands to be processed
if cmd_list_compress:
for cmd in cmd_list_compress:
cmd_str = cmdlist2str(cmd)
print(cmd_str)
| |
import ujson
import datetime as dt
import random as rnd
from Globals import *
class NewJSONPackGenerator:
def __init__(self):
self.pathToHelloPackTemplate = ""
self.pathToDataPackTemplate = ""
def dateGenerate(self):
from faker import Faker
fake = Faker()
startDate = dt.datetime(1970, 1, 2)
# startDate = dt.datetime(1970, 1, 1, hour=0, minute=0, second=1, microsecond=0, tzinfo=None)
# dt.datetime.today()
endDate = dt.datetime.now(tz=None)
date = fake.date_time_between_dates(datetime_start=startDate, datetime_end=endDate, tzinfo=None)
return date
def packsGeneration(self):
t_hello_pack = dict()
filepathToHelloPack = "D:\\Projects\\JetBrains\\PyCharm_Workspace\\Diploma\\WebServer\\Template_Packets\\DeviceHelloPacket.json"
with open(filepathToHelloPack) as helloPack:
t_hello_pack = ujson.load(helloPack)
# print(t_hello_pack)
t_data_pack = dict()
t_data_pack_2 = dict()
t_data_pack_3 = dict()
filepathToDataPack = "D:\\Projects\\JetBrains\\PyCharm_Workspace\\Diploma\\WebServer\\Template_Packets\\DataPacket.json"
with open(filepathToDataPack) as dataPack:
t_data_pack = ujson.load(dataPack)
#t_data_pack_2 = dict(t_data_pack)
#t_data_pack_3 = dict(t_data_pack)
with open(filepathToDataPack) as dataPack2:
t_data_pack_2 = ujson.load(dataPack2)
with open(filepathToDataPack) as dataPack3:
t_data_pack_3 = ujson.load(dataPack3)
# print(t_data_pack)
#
# dev_id
from faker import Faker
fake = Faker()
new_dev_id = fake.mac_address()
t_hello_pack["dev_id"] = new_dev_id
# label
devTypesCount = len(DEVICES_TYPES)
devTypeSelector = rnd.randint(0, devTypesCount - 1)
devices_type = DEVICES_TYPES[devTypeSelector]
t_hello_pack["label"] = devices_type
# time_stamp
generatedDate = self.dateGenerate()
t_hello_pack["time_stamp"] = generatedDate
#
# controls
#
# -------------------------------------------------------
# toggle
# generating name for control
name = ""
words = fake.words(nb=3)
for word in words:
name += word
t_hello_pack["controls"][0]["name"] = name
# ctrl_id
toggle_ctrl_id = rnd.randint(0, 1024)
t_hello_pack["controls"][0]["ctrl_id"] = toggle_ctrl_id
# state
selector = rnd.randint(0, 1)
toggleAviableStates = ["true", "false"]
toggle_state = toggleAviableStates[selector]
# type
t_hello_pack["controls"][0]["type"] = {"name": "toggle", "optional": {}}
# -------------------------------------------------------
# switch_state
# name
name = ""
words = fake.words(nb=3)
for word in words:
name += word
t_hello_pack["controls"][1]["name"] = name
# ctrl_id
switchstate_ctrl_id = rnd.randint(0, 1024)
t_hello_pack["controls"][1]["ctrl_id"] = switchstate_ctrl_id
# state
switch_state_st = rnd.randint(0, 2)
states = ["state_1", "state_2", "state_3"]
switchstate_state = states[switch_state_st]
# type
t_hello_pack["controls"][1]["type"] = {"name": "switch_state", "optional": {"names": states}}
# -------------------------------------------------------
# dimmer
# name
name = ""
words = fake.words(nb=3)
for word in words:
name += word
t_hello_pack["controls"][2]["name"] = name
# ctrl_id
dimmer_ctrl_id = rnd.randint(0, 1024)
t_hello_pack["controls"][2]["ctrl_id"] = dimmer_ctrl_id
# state
dimmer_state = rnd.randint(0, 100)
# type
t_hello_pack["controls"][2]["type"] = {"name": "dimmer", "optional": {}}
# -------------------------------------------------------
# num_value
# name
name = ""
words = fake.words(nb=3)
for word in words:
name += word
t_hello_pack["controls"][3]["name"] = name
# ctrl_id
numvalue_ctrl_id = rnd.randint(0, 1024)
t_hello_pack["controls"][3]["ctrl_id"] = numvalue_ctrl_id
# state
#numvalue_state = rnd.randint(0, 8196)
# type
mu_selector = rnd.randint(0, len(MEASURE_UNITS)-1)
measureUnit = MEASURE_UNITS[mu_selector]
minVal = rnd.randint(0, 100)
maxVal = rnd.randint(0, 100)
# state
if minVal <= maxVal:
t_hello_pack["controls"][3]["type"] = {"name": "num_value",
"optional": {"max": maxVal, "min": minVal, "units": measureUnit}}
numvalue_state = rnd.randint(minVal, maxVal)
else:
t_hello_pack["controls"][3]["type"] = {"name": "num_value",
"optional": {"max": minVal, "min": maxVal, "units": measureUnit}}
numvalue_state = rnd.randint(maxVal, minVal)
# -------------------------------------------------------
# sym_value
# name
'''
name = ""
words = fake.words(nb=3)
for word in words:
name += word
t_hello_pack["controls"][4]["name"] = name
# ctrl_id
symvalue_ctrl_id = rnd.randint(0, 1024)
t_hello_pack["controls"][4]["ctrl_id"] = symvalue_ctrl_id
# state
sym_value_selector = rnd.randint(0, 1)
symvalue_state = toggleAviableStates[sym_value_selector]
# type
t_hello_pack["controls"][4]["type"] = {"name": "sym_value", "optional": {}}
'''
#
# changes_packet
#
# time_stamp
generatedDate = self.dateGenerate()
t_hello_pack["changes_packet"]["time_stamp"] = generatedDate
t_hello_pack["changes_packet"]["dev_id"] = new_dev_id
t_hello_pack["changes_packet"]["controls"][0]["ctrl_id"] = toggle_ctrl_id
t_hello_pack["changes_packet"]["controls"][0]["state"] = toggle_state
t_hello_pack["changes_packet"]["controls"][1]["ctrl_id"] = switchstate_ctrl_id
# t_hello_pack["changes_packet"]["controls"][1]["state"] = switchstate_state
t_hello_pack["changes_packet"]["controls"][1]["state"] = switch_state_st
t_hello_pack["changes_packet"]["controls"][2]["ctrl_id"] = dimmer_ctrl_id
t_hello_pack["changes_packet"]["controls"][2]["state"] = dimmer_state
t_hello_pack["changes_packet"]["controls"][3]["ctrl_id"] = numvalue_ctrl_id
t_hello_pack["changes_packet"]["controls"][3]["state"] = numvalue_state
#t_hello_pack["changes_packet"]["controls"][4]["ctrl_id"] = symvalue_ctrl_id
#t_hello_pack["changes_packet"]["controls"][4]["state"] = symvalue_state
#
#
# changes_packet
#
#
data_packs = list()
# ---------------------------------------------------------------
# data pack 1
t_data_pack["dev_id"] = new_dev_id
generatedDate = self.dateGenerate()
t_data_pack["time_stamp"] = generatedDate
# controls
#
t_data_pack["dev_id"] = new_dev_id
t_data_pack["controls"][0]["ctrl_id"] = toggle_ctrl_id
selector = rnd.randint(0, 1)
toggleAviableStates = ["true", "false"]
toggle_state = toggleAviableStates[selector]
t_data_pack["controls"][0]["state"] = toggle_state
#
t_data_pack["controls"][1]["ctrl_id"] = switchstate_ctrl_id
# t_data_pack["controls"][1]["state"] = switchstate_state
switch_state_st = rnd.randint(0, 2)
t_data_pack["controls"][1]["state"] = switch_state_st
#
t_data_pack["controls"][2]["ctrl_id"] = dimmer_ctrl_id
dimmer_state = rnd.randint(0, 100)
t_data_pack["controls"][2]["state"] = dimmer_state
#
t_data_pack["controls"][3]["ctrl_id"] = numvalue_ctrl_id
numvalue_state = rnd.randint(0, 8196)
t_data_pack["controls"][3]["state"] = numvalue_state
#
'''
t_data_pack["controls"][4]["ctrl_id"] = symvalue_ctrl_id
sym_value_selector = rnd.randint(0, 1)
symvalue_state = toggleAviableStates[sym_value_selector]
t_data_pack["controls"][4]["state"] = symvalue_state
'''
#
# -----------------------------------------------------------------
# data pack 2
t_data_pack_2["dev_id"] = new_dev_id
generatedDate = self.dateGenerate()
t_data_pack_2["time_stamp"] = generatedDate
# controls
#
t_data_pack_2["dev_id"] = new_dev_id
t_data_pack_2["controls"][0]["ctrl_id"] = toggle_ctrl_id
selector = rnd.randint(0, 1)
toggleAviableStates = ["true", "false"]
toggle_state2 = toggleAviableStates[selector]
t_data_pack_2["controls"][0]["state"] = toggle_state2
#
t_data_pack_2["controls"][1]["ctrl_id"] = switchstate_ctrl_id
# t_data_pack["controls"][1]["state"] = switchstate_state
switch_state_st2 = rnd.randint(0, 2)
t_data_pack_2["controls"][1]["state"] = switch_state_st2
#
t_data_pack_2["controls"][2]["ctrl_id"] = dimmer_ctrl_id
dimmer_state2 = rnd.randint(0, 100)
t_data_pack_2["controls"][2]["state"] = dimmer_state2
#
t_data_pack_2["controls"][3]["ctrl_id"] = numvalue_ctrl_id
numvalue_state2 = rnd.randint(0, 8196)
t_data_pack_2["controls"][3]["state"] = numvalue_state2
#
'''
t_data_pack_2["controls"][4]["ctrl_id"] = symvalue_ctrl_id
sym_value_selector = rnd.randint(0, 1)
symvalue_state2 = toggleAviableStates[sym_value_selector]
t_data_pack_2["controls"][4]["state"] = symvalue_state2
'''
#
# --------------------------------------------------------------------
# data pack 3
t_data_pack_3["dev_id"] = new_dev_id
generatedDate3 = self.dateGenerate()
t_data_pack_3["time_stamp"] = generatedDate3
# controls
#
t_data_pack_3["dev_id"] = new_dev_id
t_data_pack_3["controls"][0]["ctrl_id"] = toggle_ctrl_id
selector = rnd.randint(0, 1)
toggleAviableStates = ["true", "false"]
toggle_state3 = toggleAviableStates[selector]
t_data_pack_3["controls"][0]["state"] = toggle_state3
#
t_data_pack_3["controls"][1]["ctrl_id"] = switchstate_ctrl_id
# t_data_pack["controls"][1]["state"] = switchstate_state
switch_state_st3 = rnd.randint(0, 2)
t_data_pack_3["controls"][1]["state"] = switch_state_st3
#
t_data_pack_3["controls"][2]["ctrl_id"] = dimmer_ctrl_id
dimmer_state3 = rnd.randint(0, 100)
t_data_pack_3["controls"][2]["state"] = dimmer_state3
#
t_data_pack_3["controls"][3]["ctrl_id"] = numvalue_ctrl_id
numvalue_state3 = rnd.randint(0, 8196)
t_data_pack_3["controls"][3]["state"] = numvalue_state3
#
'''
t_data_pack_3["controls"][4]["ctrl_id"] = symvalue_ctrl_id
sym_value_selector = rnd.randint(0, 1)
symvalue_state3 = toggleAviableStates[sym_value_selector]
t_data_pack_3["controls"][4]["state"] = symvalue_state3
'''
#
return (t_hello_pack, t_data_pack, t_data_pack_2, t_data_pack_3)
| |
"""
Lights on Zigbee Home Automation networks.
For more details on this platform, please refer to the documentation
at https://home-assistant.io/components/light.zha/
"""
from datetime import timedelta
import logging
from homeassistant.components import light
from homeassistant.const import STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
from .const import (
DATA_ZHA, DATA_ZHA_DISPATCHERS, ZHA_DISCOVERY_NEW, COLOR_CHANNEL,
ON_OFF_CHANNEL, LEVEL_CHANNEL, SIGNAL_ATTR_UPDATED, SIGNAL_SET_LEVEL
)
from .entity import ZhaEntity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['zha']
DEFAULT_DURATION = 5
CAPABILITIES_COLOR_XY = 0x08
CAPABILITIES_COLOR_TEMP = 0x10
UNSUPPORTED_ATTRIBUTE = 0x86
SCAN_INTERVAL = timedelta(minutes=60)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Old way of setting up Zigbee Home Automation lights."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation light from config entry."""
async def async_discover(discovery_info):
await _async_setup_entities(hass, config_entry, async_add_entities,
[discovery_info])
unsub = async_dispatcher_connect(
hass, ZHA_DISCOVERY_NEW.format(light.DOMAIN), async_discover)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
lights = hass.data.get(DATA_ZHA, {}).get(light.DOMAIN)
if lights is not None:
await _async_setup_entities(hass, config_entry, async_add_entities,
lights.values())
del hass.data[DATA_ZHA][light.DOMAIN]
async def _async_setup_entities(hass, config_entry, async_add_entities,
discovery_infos):
"""Set up the ZHA lights."""
entities = []
for discovery_info in discovery_infos:
zha_light = Light(**discovery_info)
entities.append(zha_light)
async_add_entities(entities, update_before_add=True)
class Light(ZhaEntity, light.Light):
"""Representation of a ZHA or ZLL light."""
_domain = light.DOMAIN
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Initialize the ZHA light."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._supported_features = 0
self._color_temp = None
self._hs_color = None
self._brightness = None
self._on_off_channel = self.cluster_channels.get(ON_OFF_CHANNEL)
self._level_channel = self.cluster_channels.get(LEVEL_CHANNEL)
self._color_channel = self.cluster_channels.get(COLOR_CHANNEL)
if self._level_channel:
self._supported_features |= light.SUPPORT_BRIGHTNESS
self._supported_features |= light.SUPPORT_TRANSITION
self._brightness = 0
if self._color_channel:
color_capabilities = self._color_channel.get_color_capabilities()
if color_capabilities & CAPABILITIES_COLOR_TEMP:
self._supported_features |= light.SUPPORT_COLOR_TEMP
if color_capabilities & CAPABILITIES_COLOR_XY:
self._supported_features |= light.SUPPORT_COLOR
self._hs_color = (0, 0)
@property
def should_poll(self) -> bool:
"""Poll state from device."""
return True
@property
def is_on(self) -> bool:
"""Return true if entity is on."""
if self._state is None:
return False
return self._state
@property
def brightness(self):
"""Return the brightness of this light."""
return self._brightness
@property
def device_state_attributes(self):
"""Return state attributes."""
return self.state_attributes
def set_level(self, value):
"""Set the brightness of this light between 0..254.
brightness level 255 is a special value instructing the device to come
on at `on_level` Zigbee attribute value, regardless of the last set
level
"""
value = max(0, min(254, value))
self._brightness = value
self.async_schedule_update_ha_state()
@property
def hs_color(self):
"""Return the hs color value [int, int]."""
return self._hs_color
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self._color_temp
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def async_set_state(self, state):
"""Set the state."""
self._state = bool(state)
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
await self.async_accept_signal(
self._on_off_channel, SIGNAL_ATTR_UPDATED, self.async_set_state)
if self._level_channel:
await self.async_accept_signal(
self._level_channel, SIGNAL_SET_LEVEL, self.set_level)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._state = last_state.state == STATE_ON
if 'brightness' in last_state.attributes:
self._brightness = last_state.attributes['brightness']
if 'color_temp' in last_state.attributes:
self._color_temp = last_state.attributes['color_temp']
if 'hs_color' in last_state.attributes:
self._hs_color = last_state.attributes['hs_color']
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
transition = kwargs.get(light.ATTR_TRANSITION)
duration = transition * 10 if transition else DEFAULT_DURATION
brightness = kwargs.get(light.ATTR_BRIGHTNESS)
if (brightness is not None or transition) and \
self._supported_features & light.SUPPORT_BRIGHTNESS:
if brightness is not None:
level = min(254, brightness)
else:
level = self._brightness or 254
success = await self._level_channel.move_to_level_with_on_off(
level,
duration
)
if not success:
return
self._state = bool(level)
if level:
self._brightness = level
if brightness is None or brightness:
success = await self._on_off_channel.on()
if not success:
return
self._state = True
if light.ATTR_COLOR_TEMP in kwargs and \
self.supported_features & light.SUPPORT_COLOR_TEMP:
temperature = kwargs[light.ATTR_COLOR_TEMP]
success = await self._color_channel.move_to_color_temp(
temperature, duration)
if not success:
return
self._color_temp = temperature
if light.ATTR_HS_COLOR in kwargs and \
self.supported_features & light.SUPPORT_COLOR:
hs_color = kwargs[light.ATTR_HS_COLOR]
xy_color = color_util.color_hs_to_xy(*hs_color)
success = await self._color_channel.move_to_color(
int(xy_color[0] * 65535),
int(xy_color[1] * 65535),
duration,
)
if not success:
return
self._hs_color = hs_color
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
duration = kwargs.get(light.ATTR_TRANSITION)
supports_level = self.supported_features & light.SUPPORT_BRIGHTNESS
if duration and supports_level:
success = await self._level_channel.move_to_level_with_on_off(
0,
duration*10
)
else:
success = await self._on_off_channel.off()
_LOGGER.debug("%s was turned off: %s", self.entity_id, success)
if not success:
return
self._state = False
self.async_schedule_update_ha_state()
async def async_update(self):
"""Attempt to retrieve on off state from the light."""
await super().async_update()
if self._on_off_channel:
self._state = await self._on_off_channel.get_attribute_value(
'on_off')
if self._level_channel:
self._brightness = await self._level_channel.get_attribute_value(
'current_level')
| |
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class IncomeReportStreamsMerchant(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'email': 'str',
'phone': 'str',
'address': 'str',
'website': 'str'
}
attribute_map = {
'name': 'name',
'email': 'email',
'phone': 'phone',
'address': 'address',
'website': 'website'
}
def __init__(self, name=None, email=None, phone=None, address=None, website=None): # noqa: E501
"""IncomeReportStreamsMerchant - a model defined in Swagger""" # noqa: E501
self._name = None
self._email = None
self._phone = None
self._address = None
self._website = None
self.discriminator = None
self.name = name
self.email = email
self.phone = phone
self.address = address
self.website = website
@property
def name(self):
"""Gets the name of this IncomeReportStreamsMerchant. # noqa: E501
merchant's name # noqa: E501
:return: The name of this IncomeReportStreamsMerchant. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IncomeReportStreamsMerchant.
merchant's name # noqa: E501
:param name: The name of this IncomeReportStreamsMerchant. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def email(self):
"""Gets the email of this IncomeReportStreamsMerchant. # noqa: E501
merchant's email address # noqa: E501
:return: The email of this IncomeReportStreamsMerchant. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this IncomeReportStreamsMerchant.
merchant's email address # noqa: E501
:param email: The email of this IncomeReportStreamsMerchant. # noqa: E501
:type: str
"""
if email is None:
raise ValueError("Invalid value for `email`, must not be `None`") # noqa: E501
self._email = email
@property
def phone(self):
"""Gets the phone of this IncomeReportStreamsMerchant. # noqa: E501
merchant's phone # noqa: E501
:return: The phone of this IncomeReportStreamsMerchant. # noqa: E501
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this IncomeReportStreamsMerchant.
merchant's phone # noqa: E501
:param phone: The phone of this IncomeReportStreamsMerchant. # noqa: E501
:type: str
"""
if phone is None:
raise ValueError("Invalid value for `phone`, must not be `None`") # noqa: E501
self._phone = phone
@property
def address(self):
"""Gets the address of this IncomeReportStreamsMerchant. # noqa: E501
merchant's address # noqa: E501
:return: The address of this IncomeReportStreamsMerchant. # noqa: E501
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this IncomeReportStreamsMerchant.
merchant's address # noqa: E501
:param address: The address of this IncomeReportStreamsMerchant. # noqa: E501
:type: str
"""
if address is None:
raise ValueError("Invalid value for `address`, must not be `None`") # noqa: E501
self._address = address
@property
def website(self):
"""Gets the website of this IncomeReportStreamsMerchant. # noqa: E501
merchant's website # noqa: E501
:return: The website of this IncomeReportStreamsMerchant. # noqa: E501
:rtype: str
"""
return self._website
@website.setter
def website(self, website):
"""Sets the website of this IncomeReportStreamsMerchant.
merchant's website # noqa: E501
:param website: The website of this IncomeReportStreamsMerchant. # noqa: E501
:type: str
"""
if website is None:
raise ValueError("Invalid value for `website`, must not be `None`") # noqa: E501
self._website = website
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IncomeReportStreamsMerchant, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IncomeReportStreamsMerchant):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
from collections import namedtuple
import re
from ._compat import string_types
from .exceptions import UnexpectedTokenError, TakeSyntaxError
from .scanner import TokenType
from .utils import split_name, get_via_name_list, save_to_name_list
_WS = re.compile(r'\s+')
class _SaveNode(namedtuple('_SaveNode', 'ident_parts')):
__slots__ = ()
def do(self, context):
save_to_name_list(context.rv, self.ident_parts, context.value)
def make_save(parser):
tok = parser.next_tok()
if tok.type_ != TokenType.DirectiveBodyItem:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveBodyItem)
save_id_parts = split_name(tok.content.strip())
tok = parser.next_tok()
# expecting only have one parameter
if tok.type_ != TokenType.DirectiveStatementEnd:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveStatementEnd, token=tok)
return None, _SaveNode(save_id_parts)
class _SaveEachNode(namedtuple('_SaveEachNode', 'ident_parts sub_ctx_node')):
__slots__ = ()
def do(self, context):
results = []
save_to_name_list(context.rv, self.ident_parts, results)
for item in context.value:
rv = {}
results.append(rv)
self.sub_ctx_node.do(None, rv, item, item)
def make_save_each(parser):
tok = parser.next_tok()
if tok.type_ != TokenType.DirectiveBodyItem:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveBodyItem)
save_id_parts = split_name(tok.content.strip())
tok = parser.next_tok()
# expecting only have one parameter
if tok.type_ != TokenType.DirectiveStatementEnd:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveStatementEnd, token=tok)
# consume the context token which should be a sub-context
tok = parser.next_tok()
if tok.type_ != TokenType.Context:
raise UnexpectedTokenError(tok.type_, TokenType.Context, token=tok)
if tok.end <= parser.depth:
raise TakeSyntaxError('Invalid depth, expecting to start a "save each" context.',
extra=tok)
# parse the sub-context SaveEachNode will manage
sub_ctx = parser.spawn_context_parser()
sub_ctx_node, tok = sub_ctx.parse()
sub_ctx.destroy()
return tok, _SaveEachNode(save_id_parts, sub_ctx_node)
class _NamespaceNode(namedtuple('_NamespaceNode', 'ident_parts sub_ctx_node')):
__slots__ = ()
def do(self, context):
# re-use the namespace if it was already defined ealier in the doc
sub_rv = get_via_name_list(context.rv, self.ident_parts)
if not sub_rv:
sub_rv = {}
save_to_name_list(context.rv, self.ident_parts, sub_rv)
self.sub_ctx_node.do(None, sub_rv, context.value, context.value)
def make_namespace(parser):
tok = parser.next_tok()
if tok.type_ != TokenType.DirectiveBodyItem:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveBodyItem)
save_id_parts = split_name(tok.content.strip())
tok = parser.next_tok()
# expecting only have one parameter
if tok.type_ != TokenType.DirectiveStatementEnd:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveStatementEnd, token=tok)
# consume the context token which should be a sub-context
tok = parser.next_tok()
if tok.type_ != TokenType.Context:
raise UnexpectedTokenError(tok.type_, TokenType.Context, token=tok)
if tok.end <= parser.depth:
raise TakeSyntaxError('Invalid depth, expecting to start a "namespace" context.',
extra=tok)
# parse the sub-context _NamespaceNode will manage
sub_ctx = parser.spawn_context_parser()
sub_ctx_node, tok = sub_ctx.parse()
sub_ctx.destroy()
return tok, _NamespaceNode(save_id_parts, sub_ctx_node)
class _DefSubroutine(namedtuple('_DefSubroutine', 'sub_ctx_node')):
__slots__ = ()
def do(self, context):
rv = {}
self.sub_ctx_node.do(None, rv, context.value, context.value)
context.last_value = rv
def make_def_subroutine(parser):
name_parts = []
tok = parser.next_tok()
# can have more than one name to merge
while tok.type_ == TokenType.DirectiveBodyItem:
name_parts.append(tok.content.strip())
tok = parser.next_tok()
if not name_parts:
raise TakeSyntaxError('The def directive requires a parameter.', tok)
def_name = ' '.join(name_parts)
if tok.type_ != TokenType.DirectiveStatementEnd:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveStatementEnd, token=tok)
# consume the context token which should be a sub-context
tok = parser.next_tok()
if tok.type_ != TokenType.Context:
raise UnexpectedTokenError(tok.type_, TokenType.Context, token=tok)
if tok.end <= parser.depth:
raise TakeSyntaxError('Invalid depth, expecting to start a "def" subroutine context.',
extra=tok)
# parse the sub-context _DefSubroutine will manage
sub_ctx = parser.spawn_context_parser()
sub_ctx_node, tok = sub_ctx.parse()
sub_ctx.destroy()
subroutine = _DefSubroutine(sub_ctx_node)
parser.defs[def_name] = subroutine
return tok, None
class _MergeNode(namedtuple('_MergeNode', 'names_to_save save_all')):
__slots__ = ()
def do(self, context):
if self.save_all:
# do a shallow merge
context.rv.update(context.value)
else:
src = context.value
dest = context.rv
for name_parts in self.names_to_save:
val = get_via_name_list(src, name_parts)
save_to_name_list(dest, name_parts, val)
def make_merge(parser):
names_to_save = []
tok = parser.next_tok()
# can have more than one name to merge
while tok.type_ == TokenType.DirectiveBodyItem:
id_parts = split_name(tok.content.strip())
names_to_save.append(id_parts)
tok = parser.next_tok()
if not names_to_save:
raise TakeSyntaxError('The merge directive requires a parameter, "*" '
'to save all list of keys',
tok)
if tok.type_ != TokenType.DirectiveStatementEnd:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveStatementEnd, token=tok)
if names_to_save == [('*',)]:
all = True
names_to_save = None
else:
all = False
names_to_save = tuple(names_to_save)
return None, _MergeNode(names_to_save, all)
class _ShrinkNode(object):
__slots__ = ()
def do(self, context):
val = context.value
if not isinstance(val, string_types):
tx = val.text()
else:
tx = val
context.last_value = _WS.sub(' ', tx.strip())
def make_shrink(parser):
tok = parser.next_tok()
# shouldn't have any parameters
if tok.type_ != TokenType.DirectiveStatementEnd:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveStatementEnd, token=tok)
return None, _ShrinkNode()
class _SetAccessorLastValueContext(tuple):
__slots__ = ()
def do(self, context):
context.rv['__last_value__'] = context.value
def make_set_accessor_context(parser):
tok = parser.next_tok()
# shouldn't have any parameters
if tok.type_ != TokenType.DirectiveStatementEnd:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveStatementEnd, token=tok)
return None, _SetAccessorLastValueContext()
class _CustomAccessor(namedtuple('_CustomAccessor', 'sub_ctx_node')):
__slots__ = ()
def do(self, context):
rv = {}
self.sub_ctx_node.do(None, rv, context.value, context.value)
context.last_value = rv.get('__last_value__', context.last_value)
def make_custom_accessor(parser):
name_parts = []
tok = parser.next_tok()
# can have more than one name to merge
while tok.type_ == TokenType.DirectiveBodyItem:
name_parts.append(tok.content.strip())
tok = parser.next_tok()
if not name_parts:
raise TakeSyntaxError('The def directive requires a parameter.', tok)
accessor_name = ' '.join(name_parts)
if tok.type_ != TokenType.DirectiveStatementEnd:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveStatementEnd, token=tok)
# consume the context token which should be a sub-context
tok = parser.next_tok()
if tok.type_ != TokenType.Context:
raise UnexpectedTokenError(tok.type_, TokenType.Context, token=tok)
if tok.end <= parser.depth:
raise TakeSyntaxError('Invalid depth, expecting to start a "def" subroutine context.',
extra=tok)
# parse the sub-context _DefSubroutine will manage
sub_ctx = parser.spawn_context_parser()
sub_ctx_node, tok = sub_ctx.parse()
sub_ctx.destroy()
subroutine = _CustomAccessor(sub_ctx_node)
parser.defs[accessor_name] = subroutine
return tok, None
class _RxMatchNode(namedtuple('_RxMatchNode', 'sub_ctx_node')):
__slots__ = ()
def do(self, context):
rx, text = context.value
m = rx.search(text)
# only execute the sub-context if there was a match
if m:
value = (m.group(0),) + m.groups()
self.sub_ctx_node.do(None, context.rv, value, value)
def make_rx_match(parser):
tok = parser.next_tok()
# expecting only have one parameter
if tok.type_ != TokenType.DirectiveStatementEnd:
raise UnexpectedTokenError(tok.type_, TokenType.DirectiveStatementEnd, token=tok)
# consume the context token which should be a sub-context
tok = parser.next_tok()
if tok.type_ != TokenType.Context:
raise UnexpectedTokenError(tok.type_, TokenType.Context, token=tok)
if tok.end <= parser.depth:
raise TakeSyntaxError('Invalid depth, expecting to start a "rx match" context.',
extra=tok)
# parse the sub-context SaveEachNode will manage
sub_ctx = parser.spawn_context_parser()
sub_ctx_node, tok = sub_ctx.parse()
sub_ctx.destroy()
return tok, _RxMatchNode(sub_ctx_node)
BUILTIN_DIRECTIVES = {
'save': make_save,
':': make_save,
'save each': make_save_each,
'namespace': make_namespace,
'+': make_namespace,
'def': make_def_subroutine,
'merge': make_merge,
'>>': make_merge,
'shrink': make_shrink,
'set context': make_set_accessor_context,
'accessor': make_custom_accessor,
'rx match': make_rx_match,
}
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Content Management System Model
@copyright: 2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3ContentModel",
"cms_rheader",
]
from gluon import *
from gluon.storage import Storage
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from ..s3 import *
# =============================================================================
class S3ContentModel(S3Model):
"""
Content Management System
"""
names = ["cms_series",
"cms_post",
"cms_comment",
]
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
add_component = self.add_component
comments = s3.comments
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
meta_fields = s3.meta_fields
roles_permitted = s3.roles_permitted
# ---------------------------------------------------------------------
# Series
# - lists of Posts displaying in recent-first mode
#
tablename = "cms_series"
table = define_table(tablename,
Field("name",
notnull=True,
label = T("Name")),
Field("avatar", "boolean",
default = False,
label=T("Show author picture?")),
Field("replies", "boolean",
default = False,
label=T("Comments permitted?")),
comments(),
# Multiple Roles (@ToDo: Implement the restriction)
roles_permitted(
readable = False,
writable = False
),
*meta_fields())
# CRUD Strings
ADD_SERIES = T("Add Series")
LIST_SERIES = T("List Series")
crud_strings[tablename] = Storage(
title_create = ADD_SERIES,
title_display = T("Series Details"),
title_list = LIST_SERIES,
title_update = T("Edit Series"),
title_search = T("Search Series"),
title_upload = T("Import Series"),
subtitle_create = T("Add New Series"),
subtitle_list = T("Series"),
label_list_button = LIST_SERIES,
label_create_button = ADD_SERIES,
msg_record_created = T("Series added"),
msg_record_modified = T("Series updated"),
msg_record_deleted = T("Series deleted"),
msg_list_empty = T("No series currently defined"))
# Reusable field
series_id = S3ReusableField("series_id", db.cms_series,
readable=False,
writable=False,
requires = IS_NULL_OR(IS_ONE_OF(db, "cms_series.id", "%(name)s")),
ondelete = "CASCADE")
# Resource Configuration
configure(tablename,
onaccept = self.series_onaccept,
create_next=URL(f="series", args=["[id]", "post"]))
# Components
add_component("cms_post", cms_series="series_id")
# ---------------------------------------------------------------------
# Posts
# - single blocks of rich text which can be embedded into a page,
# be viewed as full pages or as part of a Series
#
modules = {}
_modules = current.deployment_settings.modules
for module in _modules:
if module in ["appadmin", "errors", "sync"]:
continue
modules[module] = _modules[module].name_nice
tablename = "cms_post"
table = define_table(tablename,
series_id(),
Field("module",
requires = IS_NULL_OR(IS_IN_SET_LAZY(lambda: \
sort_dict_by_values(modules))),
comment = T("If you specify a module then this will be used as the text in that module's index page"),
label = T("Module")),
Field("name",
notnull=True,
comment = T("This isn't visible to the published site, it's just to help the Admin identify the text"),
label = T("Title")),
Field("body", "text",
notnull=True,
widget = s3_richtext_widget,
label = T("Body")),
Field("avatar", "boolean",
default = False,
label=T("Show author picture?")),
Field("replies", "boolean",
default = False,
label=T("Comments permitted?")),
#Field("published", "boolean",
# default = True,
# label=T("Published")),
comments(),
# Multiple Roles (@ToDo: Implement the restriction)
roles_permitted(
readable = False,
writable = False
),
*meta_fields())
# CRUD Strings
ADD_POST = T("Add Post")
LIST_POSTS = T("List Posts")
crud_strings[tablename] = Storage(
title_create = ADD_POST,
title_display = T("Post Details"),
title_list = LIST_POSTS,
title_update = T("Edit Post"),
title_search = T("Search Posts"),
title_upload = T("Import Posts"),
subtitle_create = T("Add New Post"),
subtitle_list = T("Posts"),
label_list_button = LIST_POSTS,
label_create_button = ADD_POST,
msg_record_created = T("Post added"),
msg_record_modified = T("Post updated"),
msg_record_deleted = T("Post deleted"),
msg_list_empty = T("No posts currently defined"))
# Reusable field
post_id = S3ReusableField("post_id", db.cms_post,
label = T("Post"),
sortby="name",
requires = IS_NULL_OR(IS_ONE_OF(db, "cms_post.id", "%(name)s")),
represent = lambda id, row=None: \
(id and [db.cms_post[id].name] or [NONE])[0],
comment = s3_popup_comment(c="cms",
f="post",
title=ADD_POST,
tooltip=T("A block of rich text which could be embedded into a page, viewed as a complete page or viewed as a list of news items.")),
ondelete = "CASCADE")
# Resource Configuration
configure(tablename,
onaccept = self.post_onaccept)
# Components
add_component("cms_comment", cms_post="post_id")
# ---------------------------------------------------------------------
# Comments
# - threaded comments on Posts
#
# @ToDo: Attachments?
#
# Parent field allows us to:
# * easily filter for top-level threads
# * easily filter for next level of threading
# * hook a new reply into the correct location in the hierarchy
#
tablename = "cms_comment"
table = define_table(tablename,
Field("parent", "reference cms_comment",
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"cms_comment.id")),
readable=False),
post_id(),
Field("body", "text",
notnull=True,
label = T("Comment")),
*meta_fields())
# Resource Configuration
configure(tablename,
list_fields=["id",
"post_id",
"created_by",
"modified_on"
])
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
#
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def series_onaccept(form):
"""
cascade values down to all component Posts
"""
vars = form.vars
table = current.s3db.cms_post
query = (table.series_id == vars.id)
current.db(query).update(avatar = vars.avatar,
replies = vars.replies,
roles_permitted = vars.roles_permitted,
)
return
# -------------------------------------------------------------------------
@staticmethod
def post_onaccept(form):
"""
"""
vars = form.vars
module = vars.get("module", None)
if module:
# Ensure that no other record is set as the one for this module
table = current.s3db.cms_post
query = (table.module == module) & \
(table.id != vars.id)
current.db(query).update(module=None)
return
# =============================================================================
def cms_rheader(r, tabs=[]):
""" CMS Resource Headers """
if r.representation != "html":
# RHeaders only used in interactive views
return None
record = r.record
if record is None:
# List or Create form: rheader makes no sense here
return None
table = r.table
resourcename = r.name
T = current.T
if resourcename == "series":
# Tabs
tabs = [(T("Basic Details"), None),
(T("Posts"), "post"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(
TR(
TH("%s: " % table.name.label),
record.name
),
), rheader_tabs)
elif resourcename == "post":
# Tabs
tabs = [(T("Basic Details"), None),
]
if record.replies:
tabs.append((T("Comments"), "discuss"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(
TR(
TH("%s: " % table.name.label),
record.name
),
), rheader_tabs)
return rheader
# END =========================================================================
| |
from mpf.tests.MpfFakeGameTestCase import MpfFakeGameTestCase
class TestVariablePlayer(MpfFakeGameTestCase):
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return 'tests/machine_files/variable_player/'
def test_variable_player(self):
# start game with two players
self.start_two_player_game()
self.assertFalse(self.machine.mode_controller.is_active('mode1'))
self.post_event("test_event1")
self.assertEqual(0, self.machine.game.player.score)
self.assertEqual(0, self.machine.game.player.var_c)
# start mode 1
self.post_event('start_mode1')
self.assertTrue(self.machine.mode_controller.is_active('mode1'))
self.post_event("test_add_machine_var")
self.machine_run()
self.assertMachineVarEqual(23, "my_var")
self.post_event("test_set_machine_var")
self.machine_run()
self.assertMachineVarEqual(100, "my_var")
self.post_event("test_add_machine_var")
self.machine_run()
self.assertMachineVarEqual(123, "my_var")
# test setting string
self.post_event('test_set_string')
self.assertEqual('HELLO', self.machine.game.player.string_test)
# event should score 100 now
self.post_event("test_event1")
self.assertEqual(100, self.machine.game.player.score)
self.assertEqual(1, self.machine.game.player.vars['var_a'])
self.assertEqual(0, self.machine.game.player.var_c)
self.machine.game.player.ramps = 3
self.assertMachineVarEqual(100, "my_var2")
self.post_event("test_event1")
self.assertEqual(200, self.machine.game.player.score)
self.assertEqual(2, self.machine.game.player.vars['var_a'])
self.assertEqual(3, self.machine.game.player.var_c)
self.assertMachineVarEqual(200, "my_var2")
self.post_event("test_set_100")
self.assertEqual(100, self.machine.game.player.test1)
self.post_event("test_set_200")
self.assertEqual(200, self.machine.game.player.test1)
self.post_event("test_set_100")
self.assertEqual(100, self.machine.game.player.test1)
# start mode 2
self.post_event('start_mode2')
self.assertTrue(self.machine.mode_controller.is_active('mode2'))
# event should score 1000 now (and block the 100 from mode1)
self.post_event("test_event1")
self.assertEqual(1200, self.machine.game.player.score)
# var_a is blocked
self.assertEqual(2, self.machine.game.player.vars['var_a'])
# but we count var_b
self.assertEqual(1, self.machine.game.player.vars['var_b'])
self.assertEqual(33, self.machine.game.player.var_c)
# switch players
self.drain_all_balls()
self.assertEqual(2, self.machine.game.player.number)
self.assertEqual(0, self.machine.game.player.score)
# modes should be unloaded
self.assertFalse(self.machine.mode_controller.is_active('mode1'))
self.assertFalse(self.machine.mode_controller.is_active('mode2'))
# mode is unloaded. should not score
self.post_event("test_event1")
self.assertEqual(0, self.machine.game.player.score)
# load mode two
self.post_event('start_mode2')
self.assertTrue(self.machine.mode_controller.is_active('mode2'))
self.post_event("test_event1")
self.assertEqual(1000, self.machine.game.player.score)
# var_a is 0
self.assertEqual(0, self.machine.game.player.var_a)
# but we count var_b
self.assertEqual(1, self.machine.game.player.vars['var_b'])
# switch players again
self.drain_all_balls()
self.assertEqual(1, self.machine.game.player.number)
# mode2 should auto start
self.assertFalse(self.machine.mode_controller.is_active('mode1'))
self.assertTrue(self.machine.mode_controller.is_active('mode2'))
self.assertTrue(self.machine.modes["mode2"].active)
# same score as during last ball
self.assertEqual(1200, self.machine.game.player.score)
self.assertEqual(2, self.machine.game.player.vars['var_a'])
self.assertEqual(1, self.machine.game.player.vars['var_b'])
# should still score 1000 points
self.post_event("test_event1")
self.assertEqual(2200, self.machine.game.player.score)
self.assertEqual(2, self.machine.game.player.vars['var_a'])
self.assertEqual(2, self.machine.game.player.vars['var_b'])
self.post_event("start_mode3")
self.advance_time_and_run()
self.assertPlayerVarEqual(2200, "score")
self.assertEqual(1000, self.machine.game.player_list[1].score)
self.post_event("score_player2")
self.assertPlayerVarEqual(2200, "score")
self.assertEqual(1023, self.machine.game.player_list[1].score)
self.post_event("score_player1")
self.assertPlayerVarEqual(2242, "score")
self.assertEqual(1023, self.machine.game.player_list[1].score)
self.post_event("reset_player2")
self.assertPlayerVarEqual(2242, "score")
self.assertEqual(10, self.machine.game.player_list[1].score)
self.post_event("score_float2")
self.assertPlayerVarEqual(2244, "score")
self.post_event("set_float")
self.assertPlayerVarEqual(1.5, "multiplier")
self.post_event("score_float3")
self.assertPlayerVarEqual(2394, "score")
# should not crash
self.post_event("set_player7")
self.post_event("add_player7")
# stop game and mode
self.machine.service.start_service()
self.advance_time_and_run()
# it should not crash
self.post_event("test_event1")
self.advance_time_and_run()
def test_blocking(self):
self.machine.variables.set_machine_var("player1_score", 42)
self.machine.variables.set_machine_var("player2_score", 23)
# start game
self.start_game()
# start mode 1
self.post_event("start_mode1", 1)
# test scoring
self.post_event("test_score_mode", 1)
# should score 100
self.assertPlayerVarEqual(100, "score")
# start mode 2
self.post_event("start_mode2", 1)
# test scoring
self.post_event("test_score_mode", 1)
# should score 1000 (+ 100 from the previous)
self.assertPlayerVarEqual(1100, "score")
self.post_event("stop_mode2", 1)
# test scoring
self.post_event("test_score_mode", 1)
# should score 100 again (+ 1100 from the previous)
self.assertPlayerVarEqual(1200, "score")
self.post_event("stop_mode1")
# we still see the old score here
self.assertMachineVarEqual(42, "player1_score")
self.stop_game()
self.assertMachineVarEqual(1200, "player1_score")
self.assertFalse(self.machine.variables.is_machine_var("player2_score"))
self.assertFalse(self.machine.variables.is_machine_var("player3_score"))
self.assertFalse(self.machine.variables.is_machine_var("player4_score"))
def test_blocking_multiple_with_logic_block(self):
# this test was adapted from a real game
# start game
self.start_game()
# start mode 1
self.post_event("start_mode1", 1)
# hit target 3 times for 10 points each
# and complete the logic block
for x in range(0, 3):
self.hit_and_release_switch("s_counter_target")
self.assertPlayerVarEqual(30, "score")
# start mode_for_logic_block
self.post_event("counter_target_complete")
# both modes running now
self.assertModeRunning('mode_for_logic_block')
self.assertModeRunning('mode1')
# hit the target while in the new mode
self.hit_and_release_switch("s_counter_target")
self.assertPlayerVarEqual(130, "score")
# trigger the end of the counter_targer mode
# which also blocks its mode1 scoring this once
self.hit_and_release_switch("s_kills_counter_target")
self.assertPlayerVarEqual(630, "score")
# only mode1 running now
self.assertModeNotRunning('mode_for_logic_block')
self.assertModeRunning('mode1')
# target only scores 10 again... or does it???
self.hit_and_release_switch("s_counter_target")
self.assertPlayerVarEqual(640, "score")
def test_non_game_mode(self):
# start non game mode outside of game
self.post_event("start_non_game_mode")
self.machine.variables.set_machine_var("test", "321")
self.machine.variables.set_machine_var("test2", 3)
self.post_event("test_event")
self.assertMachineVarEqual("123", "test")
self.assertMachineVarEqual(10, "test2")
# test subscription
self.machine.variables.set_machine_var("test5", "123")
self.advance_time_and_run(.1)
self.assertMachineVarEqual("123-suffix", "test6")
def test_event_kwargs(self):
self.start_game()
self.post_event('start_mode1')
self.assertTrue(self.machine.mode_controller.is_active('mode1'))
self.post_event('start_mode3')
self.assertTrue(self.machine.mode_controller.is_active('mode3'))
self.mock_event('player_score')
self.post_event('test_event1')
self.advance_time_and_run()
self.assertEventCalledWith('player_score',
value=100,
prev_value=0,
change=100,
player_num=1,
source='mode1')
self.post_event('score_player1')
self.advance_time_and_run()
self.assertEventCalledWith('player_score',
value=142,
prev_value=100,
change=42,
player_num=1,
source='mode3')
| |
"""
RED Log Encodings
=================
Defines the *RED* log encodings:
- :func:`colour.models.log_encoding_REDLog`
- :func:`colour.models.log_decoding_REDLog`
- :func:`colour.models.log_encoding_REDLogFilm`
- :func:`colour.models.log_decoding_REDLogFilm`
- :func:`colour.models.log_encoding_Log3G10_v1`
- :func:`colour.models.log_decoding_Log3G10_v1`
- :func:`colour.models.log_encoding_Log3G10_v2`
- :func:`colour.models.log_decoding_Log3G10_v2`
- :func:`colour.models.log_encoding_Log3G10_v3`
- :func:`colour.models.log_decoding_Log3G10_v3`
- :attr:`colour.models.LOG3G10_ENCODING_METHODS`
- :func:`colour.models.log_encoding_Log3G10`
- :attr:`colour.models.LOG3G10_DECODING_METHODS`
- :func:`colour.models.log_decoding_Log3G10`
- :func:`colour.models.log_encoding_Log3G12`
- :func:`colour.models.log_decoding_Log3G12`
References
----------
- :cite:`Nattress2016a` : Nattress, G. (2016). Private Discussion with Shaw,
N.
- :cite:`REDDigitalCinema2017` : RED Digital Cinema. (2017). White Paper on
REDWideGamutRGB and Log3G10. Retrieved January 16, 2021, from
https://www.red.com/download/white-paper-on-redwidegamutrgb-and-log3g10
- :cite:`SonyImageworks2012a` : Sony Imageworks. (2012). make.py. Retrieved
November 27, 2014, from
https://github.com/imageworks/OpenColorIO-Configs/blob/master/\
nuke-default/make.py
"""
from __future__ import annotations
import numpy as np
from colour.hints import (
FloatingOrArrayLike,
FloatingOrNDArray,
Literal,
Union,
)
from colour.models.rgb.transfer_functions import (
log_encoding_Cineon,
log_decoding_Cineon,
)
from colour.utilities import (
CaseInsensitiveMapping,
as_float,
as_float_array,
from_range_1,
to_domain_1,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"log_encoding_REDLog",
"log_decoding_REDLog",
"log_encoding_REDLogFilm",
"log_decoding_REDLogFilm",
"log_encoding_Log3G10_v1",
"log_decoding_Log3G10_v1",
"log_encoding_Log3G10_v2",
"log_decoding_Log3G10_v2",
"log_encoding_Log3G10_v3",
"log_decoding_Log3G10_v3",
"LOG3G10_ENCODING_METHODS",
"log_encoding_Log3G10",
"LOG3G10_DECODING_METHODS",
"log_decoding_Log3G10",
"log_encoding_Log3G12",
"log_decoding_Log3G12",
]
def log_encoding_REDLog(
x: FloatingOrArrayLike,
black_offset: FloatingOrArrayLike = 10 ** ((0 - 1023) / 511),
) -> FloatingOrNDArray:
"""
Define the *REDLog* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x
Linear data :math:`x`.
black_offset
Black offset.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`SonyImageworks2012a`
Examples
--------
>>> log_encoding_REDLog(0.18) # doctest: +ELLIPSIS
0.6376218...
"""
x = to_domain_1(x)
black_offset = as_float_array(black_offset)
y = (1023 + 511 * np.log10(x * (1 - black_offset) + black_offset)) / 1023
return as_float(from_range_1(y))
def log_decoding_REDLog(
y: FloatingOrArrayLike,
black_offset: FloatingOrArrayLike = 10 ** ((0 - 1023) / 511),
) -> FloatingOrNDArray:
"""
Define the *REDLog* log decoding curve / electro-optical transfer
function.
Parameters
----------
y
Non-linear data :math:`y`.
black_offset
Black offset.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`SonyImageworks2012a`
Examples
--------
>>> log_decoding_REDLog(0.637621845988175) # doctest: +ELLIPSIS
0.1...
"""
y = to_domain_1(y)
black_offset = as_float_array(black_offset)
x = ((10 ** ((1023 * y - 1023) / 511)) - black_offset) / (1 - black_offset)
return as_float(from_range_1(x))
def log_encoding_REDLogFilm(
x: FloatingOrArrayLike,
black_offset: FloatingOrArrayLike = 10 ** ((95 - 685) / 300),
) -> FloatingOrNDArray:
"""
Define the *REDLogFilm* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x
Linear data :math:`x`.
black_offset
Black offset.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`SonyImageworks2012a`
Examples
--------
>>> log_encoding_REDLogFilm(0.18) # doctest: +ELLIPSIS
0.4573196...
"""
return log_encoding_Cineon(x, black_offset)
def log_decoding_REDLogFilm(
y: FloatingOrArrayLike,
black_offset: FloatingOrArrayLike = 10 ** ((95 - 685) / 300),
) -> FloatingOrNDArray:
"""
Define the *REDLogFilm* log decoding curve / electro-optical transfer
function.
Parameters
----------
y
Non-linear data :math:`y`.
black_offset
Black offset.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`SonyImageworks2012a`
Examples
--------
>>> log_decoding_REDLogFilm(0.457319613085418) # doctest: +ELLIPSIS
0.1799999...
"""
return log_decoding_Cineon(y, black_offset)
def log_encoding_Log3G10_v1(x: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Define the *Log3G10* *v1* log encoding curve / opto-electronic transfer
function, the curve used in *REDCINE-X PRO Beta 42* and *Resolve 12.5.2*.
Parameters
----------
x
Linear data :math:`x`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nattress2016a`
Examples
--------
>>> log_encoding_Log3G10_v1(0.18) # doctest: +ELLIPSIS
0.3333336...
"""
x = to_domain_1(x)
y = np.sign(x) * 0.222497 * np.log10((np.abs(x) * 169.379333) + 1)
return as_float(from_range_1(y))
def log_decoding_Log3G10_v1(y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Define the *Log3G10* *v1* log decoding curve / electro-optical transfer
function, the curve used in *REDCINE-X PRO Beta 42* and *Resolve 12.5.2*.
Parameters
----------
y
Non-linear data :math:`y`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nattress2016a`
Examples
--------
>>> log_decoding_Log3G10_v1(1.0 / 3) # doctest: +ELLIPSIS
0.1799994...
"""
y = to_domain_1(y)
x = np.sign(y) * (10.0 ** (np.abs(y) / 0.222497) - 1) / 169.379333
return as_float(from_range_1(x))
def log_encoding_Log3G10_v2(x: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Define the *Log3G10* *v2* log encoding curve / opto-electronic transfer
function, the current curve in *REDCINE-X PRO*.
Parameters
----------
x
Linear data :math:`x`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nattress2016a`
Examples
--------
>>> log_encoding_Log3G10_v2(0.0) # doctest: +ELLIPSIS
0.0915514...
"""
x = to_domain_1(x)
y = (
np.sign(x + 0.01)
* 0.224282
* np.log10((np.abs(x + 0.01) * 155.975327) + 1)
)
return as_float(from_range_1(y))
def log_decoding_Log3G10_v2(y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Define the *Log3G10* *v2* log decoding curve / electro-optical transfer
function, the current curve in *REDCINE-X PRO*.
Parameters
----------
y
Non-linear data :math:`y`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nattress2016a`
Examples
--------
>>> log_decoding_Log3G10_v2(1.0) # doctest: +ELLIPSIS
184.3223476...
"""
y = to_domain_1(y)
x = (np.sign(y) * (10.0 ** (np.abs(y) / 0.224282) - 1) / 155.975327) - 0.01
return as_float(from_range_1(x))
def log_encoding_Log3G10_v3(x: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Define the *Log3G10* *v3* log encoding curve / opto-electronic transfer
function, the curve described in the *RedLog3G10* Whitepaper.
Parameters
----------
x
Linear data :math:`x`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`REDDigitalCinema2017`
Examples
--------
>>> log_encoding_Log3G10_v3(0.0) # doctest: +ELLIPSIS
0.09155148...
"""
a = 0.224282
b = 155.975327
c = 0.01
g = 15.1927
x = to_domain_1(x)
x = x + c
y = np.where(
x < 0.0, x * g, np.sign(x) * a * np.log10((np.abs(x) * b) + 1.0)
)
return as_float(from_range_1(y))
def log_decoding_Log3G10_v3(y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Define the *Log3G10* *v3* log decoding curve / electro-optical transfer
function, the curve described in the *RedLog3G10* whitepaper.
Parameters
----------
y
Non-linear data :math:`y`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`REDDigitalCinema2017`
Examples
--------
>>> log_decoding_Log3G10_v3(1.0) # doctest: +ELLIPSIS
184.32234764...
"""
a = 0.224282
b = 155.975327
c = 0.01
g = 15.1927
y = to_domain_1(y)
x = np.where(
y < 0.0,
(y / g) - c,
np.sign(y) * (10 ** (np.abs(y) / a) - 1.0) / b - c,
)
return as_float(from_range_1(x))
LOG3G10_ENCODING_METHODS: CaseInsensitiveMapping = CaseInsensitiveMapping(
{
"v1": log_encoding_Log3G10_v1,
"v2": log_encoding_Log3G10_v2,
"v3": log_encoding_Log3G10_v3,
}
)
LOG3G10_ENCODING_METHODS.__doc__ = """
Supported *Log3G10* log encoding curve / opto-electronic transfer function
methods.
References
----------
:cite:`Nattress2016a`, :cite:`REDDigitalCinema2017`
"""
def log_encoding_Log3G10(
x: FloatingOrArrayLike,
method: Union[Literal["v1", "v2", "v3"], str] = "v3",
) -> FloatingOrNDArray:
"""
Define the *Log3G10* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x
Linear data :math:`x`.
method
Computation method.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
- The *Log3G10* *v1* log encoding curve is the one used in
*REDCINE-X Beta 42*. *Resolve 12.5.2* also uses the *v1* curve. *RED*
is planning to use the *Log3G10* *v2* log encoding curve in the release
version of the *RED SDK*.
- The intent of the *Log3G10* *v1* log encoding curve is that zero maps
to zero, 0.18 maps to 1/3, and 10 stops above 0.18 maps to 1.0.
The name indicates this in a similar way to the naming conventions of
*Sony HyperGamma* curves.
The constants used in the functions do not in fact quite hit these
values, but rather than use corrected constants, the functions here
use the official *RED* values, in order to match the output of the
*RED SDK*.
For those interested, solving for constants which exactly hit 1/3
and 1.0 yields the following values::
B = 25 * (np.sqrt(4093.0) - 3) / 9
A = 1 / np.log10(B * 184.32 + 1)
where the function takes the form::
Log3G10(x) = A * np.log10(B * x + 1)
Similarly for *Log3G12*, the values which hit exactly 1/3 and 1.0
are::
B = 25 * (np.sqrt(16381.0) - 3) / 9
A = 1 / np.log10(B * 737.28 + 1)
References
----------
:cite:`Nattress2016a`, :cite:`REDDigitalCinema2017`
Examples
--------
>>> log_encoding_Log3G10(0.0) # doctest: +ELLIPSIS
0.09155148...
>>> log_encoding_Log3G10(0.18, method='v1') # doctest: +ELLIPSIS
0.3333336...
"""
method = validate_method(method, LOG3G10_ENCODING_METHODS)
return LOG3G10_ENCODING_METHODS[method](x)
LOG3G10_DECODING_METHODS = CaseInsensitiveMapping(
{
"v1": log_decoding_Log3G10_v1,
"v2": log_decoding_Log3G10_v2,
"v3": log_decoding_Log3G10_v3,
}
)
LOG3G10_DECODING_METHODS.__doc__ = """
Supported *Log3G10* log decoding curve / electro-optical transfer function
methods.
References
----------
:cite:`Nattress2016a`, :cite:`REDDigitalCinema2017`
"""
def log_decoding_Log3G10(
y, method: Union[Literal["v1", "v2", "v3"], str] = "v3"
) -> FloatingOrNDArray:
"""
Define the *Log3G10* log decoding curve / electro-optical transfer
function.
Parameters
----------
y
Non-linear data :math:`y`.
method
Computation method.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nattress2016a`, :cite:`REDDigitalCinema2017`
Examples
--------
>>> log_decoding_Log3G10(1.0) # doctest: +ELLIPSIS
184.3223476...
>>> log_decoding_Log3G10(1.0 / 3, method='v1') # doctest: +ELLIPSIS
0.1799994...
"""
method = validate_method(method, LOG3G10_DECODING_METHODS)
return LOG3G10_DECODING_METHODS[method](y)
def log_encoding_Log3G12(x: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Define the *Log3G12* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x
Linear data :math:`x`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Non-linear data :math:`y`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nattress2016a`, :cite:`REDDigitalCinema2017`
Examples
--------
>>> log_encoding_Log3G12(0.18) # doctest: +ELLIPSIS
0.3333326...
"""
x = to_domain_1(x)
y = np.sign(x) * 0.184904 * np.log10((np.abs(x) * 347.189667) + 1)
return as_float(from_range_1(y))
def log_decoding_Log3G12(y: FloatingOrArrayLike) -> FloatingOrNDArray:
"""
Define the *Log3G12* log decoding curve / electro-optical transfer
function.
Parameters
----------
y
Non-linear data :math:`y`.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Linear data :math:`x`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``y`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``x`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nattress2016a`, :cite:`REDDigitalCinema2017`
Examples
--------
>>> log_decoding_Log3G12(1.0 / 3) # doctest: +ELLIPSIS
0.1800015...
"""
y = to_domain_1(y)
x = np.sign(y) * (10.0 ** (np.abs(y) / 0.184904) - 1) / 347.189667
return as_float(from_range_1(x))
| |
import numpy as np
from proteus import Domain, Context, Comm
from proteus.mprans import SpatialTools as st
import proteus.TwoPhaseFlow.TwoPhaseFlowProblem as TpFlow
import proteus.TwoPhaseFlow.utils.Parameters as Parameters
from proteus import WaveTools as wt
from proteus.Profiling import logEvent
from proteus.mbd import CouplingFSI as fsi
import os
import pychrono
rho_0 = 998.2
nu_0 = 1.004e-6
rho_1 = 1.205
nu_1 = 1.5e-5
sigma_01 = 0.
he = 0.05
tank_dim = [1., 1.]
water_level = 0.5
genMesh = False
rhor = 0.5
# ____ _
# | _ \ ___ _ __ ___ __ _(_)_ __
# | | | |/ _ \| '_ ` _ \ / _` | | '_ \
# | |_| | (_) | | | | | | (_| | | | | |
# |____/ \___/|_| |_| |_|\__,_|_|_| |_|
# Domain
# All geometrical options go here (but not mesh options)
domain = Domain.PlanarStraightLineGraphDomain()
# ----- SHAPES ----- #
# TANK
tank = st.Tank2D(domain, tank_dim)
# CAISSON
radius = 0.1
#caisson = st.Circle(domain,
# radius=radius,
# coords=(tank_dim[0]/2., water_level+radius-2.*radius*rhor+radius/100.),
# barycenter=(tank_dim[0]/2., water_level+radius-2.*radius*rhor+radius/100.),
# nPoints=int(np.pi*radius/he))
caisson = st.Rectangle(domain,
dim=[2*radius, 2*radius],
coords=(tank_dim[0]/2., water_level+radius/10.),
barycenter=(tank_dim[0]/2., water_level+radius/10.))
caisson.setHoles([caisson.barycenter[:2]])
caisson.holes_ind = np.array([0])
# let gmsh know that the caisson is IN the tank
tank.setChildShape(caisson, 0)
# ____ _ ____ _ _ _ _
# | __ ) ___ _ _ _ __ __| | __ _ _ __ _ _ / ___|___ _ __ __| (_) |_(_) ___ _ __ ___
# | _ \ / _ \| | | | '_ \ / _` |/ _` | '__| | | | | / _ \| '_ \ / _` | | __| |/ _ \| '_ \/ __|
# | |_) | (_) | |_| | | | | (_| | (_| | | | |_| | |__| (_) | | | | (_| | | |_| | (_) | | | \__ \
# |____/ \___/ \__,_|_| |_|\__,_|\__,_|_| \__, |\____\___/|_| |_|\__,_|_|\__|_|\___/|_| |_|___/
# |___/
# Boundary Conditions
tank.BC['y+'].setAtmosphere()
tank.BC['y-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
tank.BC['x-'].setFreeSlip()
tank.BC['sponge'].setNonMaterial()
tank.BC['x-'].setFixedNodes()
tank.BC['x+'].setFixedNodes()
tank.BC['sponge'].setFixedNodes()
tank.BC['y+'].setFixedNodes() # sliding mesh nodes
tank.BC['y-'].setFixedNodes() #sliding mesh nodes
for bc in caisson.BC_list:
bc.setNoSlip()
# ___ _ _ _ _ ____ _ _ _ _
# |_ _|_ __ (_) |_(_) __ _| | / ___|___ _ __ __| (_) |_(_) ___ _ __ ___
# | || '_ \| | __| |/ _` | | | | / _ \| '_ \ / _` | | __| |/ _ \| '_ \/ __|
# | || | | | | |_| | (_| | | | |__| (_) | | | | (_| | | |_| | (_) | | | \__ \
# |___|_| |_|_|\__|_|\__,_|_| \____\___/|_| |_|\__,_|_|\__|_|\___/|_| |_|___/
# Initial Conditions
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.ctransportCoefficients import smoothedHeaviside_integral
smoothing = 1.5 * he
nd = domain.nd
class P_IC:
def uOfXT(self, x, t):
p_L = 0.0
phi_L = tank_dim[nd-1] - water_level
phi = x[nd-1] - water_level
p = p_L -g[nd-1]*(rho_0*(phi_L - phi)
+(rho_1 -rho_0)*(smoothedHeaviside_integral(smoothing,phi_L)
-smoothedHeaviside_integral(smoothing,phi)))
return p
class Zero_IC:
def uOfXT(self, x, t):
return 0.0
class U_IC:
def uOfXT(self, x, t):
return 0.0
class V_IC:
def uOfXT(self, x, t):
return 0.0
class W_IC:
def uOfXT(self, x, t):
return 0.0
class VF_IC:
def uOfXT(self, x, t):
return smoothedHeaviside(smoothing,x[nd-1]-water_level)
class PHI_IC:
def uOfXT(self, x, t):
return x[nd-1] - water_level
# ____ _
# / ___| |__ _ __ ___ _ __ ___
# | | | '_ \| '__/ _ \| '_ \ / _ \
# | |___| | | | | | (_) | | | | (_) |
# \____|_| |_|_| \___/|_| |_|\___/
# Chrono
# System
g = np.array([0., -9.81, 0.])
system = fsi.ProtChSystem()
system.ChSystem.Set_G_acc(pychrono.ChVectorD(g[0], g[1], g[2]))
system.setTimeStep(1e-5)
#system.setCouplingScheme("CSS", prediction="backwardEuler")
# Body
body = fsi.ProtChBody(system=system)
body.attachShape(caisson)
#body.Aij_factor = 1/width
chbod = body.ChBody
x, y, z = caisson.barycenter
pos = pychrono.ChVectorD(x, y, z)
mass = (2.*radius)**2*rho_0*rhor
inertia = pychrono.ChVectorD(1., 1., 1.)
chbod.SetPos(pos)
chbod.SetMass(mass)
chbod.SetInertiaXX(inertia)
#chbod.SetBodyFixed(True)
body.setConstraints(free_x=np.array([0.,1.,0.]), free_r=np.array([0.,0.,0.]))
# body.setInitialRot(rotation_init)
# body.rotation_init=np.array([np.cos(ang/2.), 0., 0., np.sin(ang/2.)*1.])
body.setRecordValues(all_values=True)
# __ __ _ ___ _ _
# | \/ | ___ ___| |__ / _ \ _ __ | |_(_) ___ _ __ ___
# | |\/| |/ _ \/ __| '_ \ | | | | '_ \| __| |/ _ \| '_ \/ __|
# | | | | __/\__ \ | | | | |_| | |_) | |_| | (_) | | | \__ \
# |_| |_|\___||___/_| |_| \___/| .__/ \__|_|\___/|_| |_|___/
# |_|
domain.MeshOptions.use_gmsh = genMesh
domain.MeshOptions.genMesh = genMesh
he = he
domain.MeshOptions.he = he
modulepath = os.path.dirname(os.path.abspath(__file__))
mesh_fileprefix=modulepath+'/meshFloatingCylinder'
domain.MeshOptions.setOutputFiles(mesh_fileprefix)
st.assembleDomain(domain)
domain.use_gmsh = False
domain.geofile = mesh_fileprefix
# _ _ _
# | \ | |_ _ _ __ ___ ___ _ __(_) ___ ___
# | \| | | | | '_ ` _ \ / _ \ '__| |/ __/ __|
# | |\ | |_| | | | | | | __/ | | | (__\__ \
# |_| \_|\__,_|_| |_| |_|\___|_| |_|\___|___/
# Numerics
myTpFlowProblem = TpFlow.TwoPhaseFlowProblem()
myTpFlowProblem.outputStepping.final_time = 0.1
myTpFlowProblem.outputStepping.dt_init = 0.01
myTpFlowProblem.outputStepping.dt_output = 0.1
myTpFlowProblem.outputStepping.dt_fixed = 0.01
myTpFlowProblem.outputStepping.archiveAllSteps = True
myTpFlowProblem.domain = domain
myTpFlowProblem.SystemNumerics.useSuperlu=False
myTpFlowProblem.SystemNumerics.cfl=0.9
myTpFlowProblem.SystemPhysics.setDefaults()
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelMoveMeshElastic,'move')
myTpFlowProblem.SystemPhysics.useDefaultModels()
myTpFlowProblem.SystemPhysics.addModel(Parameters.ParametersModelAddedMass,'addedMass')
myTpFlowProblem.SystemPhysics.movingDomain = True
# line below needed for relaxation zones
# (!) hack
m = myTpFlowProblem.SystemPhysics.modelDict
m['flow'].auxiliaryVariables += domain.auxiliaryVariables['twp']
params = myTpFlowProblem.SystemPhysics
#initialConditions
myTpFlowProblem.SystemPhysics.modelDict['move'].p.initialConditions['hx']=Zero_IC()
myTpFlowProblem.SystemPhysics.modelDict['move'].p.initialConditions['hy']=Zero_IC()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['p']=P_IC()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['u']=U_IC()
myTpFlowProblem.SystemPhysics.modelDict['flow'].p.initialConditions['v']=V_IC()
myTpFlowProblem.SystemPhysics.modelDict['vof'].p.initialConditions['vof']=VF_IC()
myTpFlowProblem.SystemPhysics.modelDict['ncls'].p.initialConditions['phi']=PHI_IC()
myTpFlowProblem.SystemPhysics.modelDict['rdls'].p.initialConditions['phid']=PHI_IC()
myTpFlowProblem.SystemPhysics.modelDict['mcorr'].p.initialConditions['phiCorr']=PHI_IC()
myTpFlowProblem.SystemPhysics.modelDict['addedMass'].p.initialConditions['addedMass']=Zero_IC()
# PHYSICAL PARAMETERS
params['rho_0'] = rho_0 # water
params['rho_1'] = rho_1 # air
params['nu_0'] = nu_0 # water
params['nu_1'] = nu_1 # air
params['gravity'] = np.array(g)
params['surf_tension_coeff'] = sigma_01
m['flow'].auxiliaryVariables += [system]
m['flow'].p.coefficients.eb_bc_penalty_constant = 10.#/nu_0#Re
m['addedMass'].auxiliaryVariables += [system.ProtChAddedMass]
max_flag = 0
max_flag = max(domain.vertexFlags)
max_flag = max(domain.segmentFlags+[max_flag])
max_flag = max(domain.facetFlags+[max_flag])
flags_rigidbody = np.zeros(max_flag+1, dtype='int32')
for s in system.subcomponents:
if type(s) is fsi.ProtChBody:
for i in s.boundaryFlags:
flags_rigidbody[i] = 1
m['addedMass'].p.coefficients.flags_rigidbody = flags_rigidbody
| |
# -*- coding: utf-8 -*-
from collections import Iterable
from math import sqrt
import numpy as np
from pyfr.nputil import chop
from pyfr.util import lazyprop, subclass_where
def jacobi(n, a, b, z):
j = [1]
if n >= 1:
j.append(((a + b + 2)*z + a - b) / 2)
if n >= 2:
apb, bbmaa = a + b, b*b - a*a
for q in range(2, n + 1):
qapbpq, apbp2q = q*(apb + q), apb + 2*q
apbp2qm1, apbp2qm2 = apbp2q - 1, apbp2q - 2
aq = apbp2q*apbp2qm1/(2*qapbpq)
bq = apbp2qm1*bbmaa/(2*qapbpq*apbp2qm2)
cq = apbp2q*(a + q - 1)*(b + q - 1)/(qapbpq*apbp2qm2)
# Update
j.append((aq*z - bq)*j[-1] - cq*j[-2])
return j
def jacobi_diff(n, a, b, z):
dj = [0]
if n >= 1:
dj.extend(jp*(i + a + b + 2)/2
for i, jp in enumerate(jacobi(n - 1, a + 1, b + 1, z)))
return dj
def get_polybasis(name, order, pts=[]):
return subclass_where(BasePolyBasis, name=name)(order, pts)
class BasePolyBasis(object):
name = None
def __init__(self, order, pts):
self.order = order
self.pts = pts
@chop
def ortho_basis_at(self, pts):
if len(pts) and not isinstance(pts[0], Iterable):
pts = [(p,) for p in pts]
return np.array([self.ortho_basis_at_py(*p) for p in pts]).T
@chop
def jac_ortho_basis_at(self, pts):
if len(pts) and not isinstance(pts[0], Iterable):
pts = [(p,) for p in pts]
J = [self.jac_ortho_basis_at_py(*p) for p in pts]
return np.array(J).swapaxes(0, 2)
@chop
def nodal_basis_at(self, epts):
return np.linalg.solve(self.vdm, self.ortho_basis_at(epts)).T
@chop
def jac_nodal_basis_at(self, epts):
return np.linalg.solve(self.vdm, self.jac_ortho_basis_at(epts))
@lazyprop
def vdm(self):
return self.ortho_basis_at(self.pts)
class LinePolyBasis(BasePolyBasis):
name = 'line'
def ortho_basis_at_py(self, p):
jp = jacobi(self.order - 1, 0, 0, p)
return [sqrt(i + 0.5)*p for i, p in enumerate(jp)]
def jac_ortho_basis_at_py(self, p):
djp = jacobi_diff(self.order - 1, 0, 0, p)
return [sqrt(i + 0.5)*p for i, p in enumerate(djp)]
@lazyprop
def degrees(self):
return list(range(self.order))
class TriPolyBasis(BasePolyBasis):
name = 'tri'
def ortho_basis_at_py(self, p, q):
a = 2*(1 + p)/(1 - q) - 1 if q != 1 else -1
b = q
ob = []
for i, pi in enumerate(jacobi(self.order - 1, 0, 0, a)):
pa = pi*(1 - b)**i
for j, pj in enumerate(jacobi(self.order - i - 1, 2*i + 1, 0, b)):
cij = sqrt((2*i + 1)*(2*i + 2*j + 2)) / 2**(i + 1)
ob.append(cij*pa*pj)
return ob
def jac_ortho_basis_at_py(self, p, q):
a = 2*(1 + p)/(1 - q) - 1 if q != 1 else -1
b = q
f = jacobi(self.order - 1, 0, 0, a)
df = jacobi_diff(self.order - 1, 0, 0, a)
ob = []
for i, (fi, dfi) in enumerate(zip(f, df)):
g = jacobi(self.order - i - 1, 2*i + 1, 0, b)
dg = jacobi_diff(self.order - i - 1, 2*i + 1, 0, b)
for j, (gj, dgj) in enumerate(zip(g, dg)):
cij = sqrt((2*i + 1)*(2*i + 2*j + 2)) / 2**(i + 1)
tmp = (1 - b)**(i - 1) if i > 0 else 1
pij = 2*tmp*dfi*gj
qij = tmp*(-i*fi + (1 + a)*dfi)*gj + (1 - b)**i*fi*dgj
ob.append([cij*pij, cij*qij])
return ob
@lazyprop
def degrees(self):
return [i + j
for i in range(self.order)
for j in range(self.order - i)]
class QuadPolyBasis(BasePolyBasis):
name = 'quad'
def ortho_basis_at_py(self, p, q):
sk = [sqrt(k + 0.5) for k in range(self.order)]
pa = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, p))]
pb = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, q))]
return [pi*pj for pi in pa for pj in pb]
def jac_ortho_basis_at_py(self, p , q):
sk = [sqrt(k + 0.5) for k in range(self.order)]
pa = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, p))]
pb = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, q))]
dpa = [c*jp for c, jp in zip(sk, jacobi_diff(self.order - 1, 0, 0, p))]
dpb = [c*jp for c, jp in zip(sk, jacobi_diff(self.order - 1, 0, 0, q))]
return [[dpi*pj, pi*dpj]
for pi, dpi in zip(pa, dpa)
for pj, dpj in zip(pb, dpb)]
@lazyprop
def degrees(self):
return [i + j for i in range(self.order) for j in range(self.order)]
class TetPolyBasis(BasePolyBasis):
name = 'tet'
def ortho_basis_at_py(self, p, q, r):
a = -2*(1 + p)/(q + r) - 1 if r != -q else -1
b = 2*(1 + q)/(1 - r) - 1 if r != 1 else -1
c = r
ob = []
for i, pi in enumerate(jacobi(self.order - 1, 0, 0, a)):
ci = 2**(-2*i - 1)*sqrt(2*i + 1)*(1 - b)**i
for j, pj in enumerate(jacobi(self.order - i - 1, 2*i + 1, 0, b)):
cj = sqrt(i + j + 1)*2**-j*(1 - c)**(i + j)
cij = ci*cj
pij = pi*pj
jp = jacobi(self.order - i - j - 1, 2*(i + j + 1), 0, c)
for k, pk in enumerate(jp):
ck = sqrt(2*(k + j + i) + 3)
ob.append(cij*ck*pij*pk)
return ob
def jac_ortho_basis_at_py(self, p, q, r):
a = -2*(1 + p)/(q + r) - 1 if r != -q else -1
b = 2*(1 + q)/(1 - r) - 1 if r != 1 else -1
c = r
f = jacobi(self.order - 1, 0, 0, a)
df = jacobi_diff(self.order - 1, 0, 0, a)
ob = []
for i, (fi, dfi) in enumerate(zip(f, df)):
ci = 2**(-2*i - 1)*sqrt(2*i + 1)
g = jacobi(self.order - i - 1, 2*i + 1, 0, b)
dg = jacobi_diff(self.order - i - 1, 2*i + 1, 0, b)
for j, (gj, dgj) in enumerate(zip(g, dg)):
cj = sqrt(i + j + 1)*2**-j
cij = ci*cj
h = jacobi(self.order - i - j - 1, 2*(i + j + 1), 0, c)
dh = jacobi_diff(self.order - i - j - 1, 2*(i + j + 1), 0, c)
for k, (hk, dhk) in enumerate(zip(h, dh)):
ck = sqrt(2*(k + j + i) + 3)
cijk = cij*ck
tmp1 = (1 - c)**(i + j - 1) if i + j > 0 else 1
tmp2 = tmp1*(1 - b)**(i - 1) if i > 0 else 1
pijk = 4*tmp2*dfi*gj*hk
qijk = 2*(tmp2*(-i*fi + (1 + a)*dfi)*gj
+ tmp1*(1 - b)**i*fi*dgj)*hk
rijk = (
2*(1 + a)*tmp2*dfi*gj*hk
+ (1 + b)*tmp1*(1 - b)**i*fi*dgj*hk
+ (1 - c)**(i + j)*(1 - b)**i*fi*gj*dhk
- (i*(1 + b)*tmp2 + (i + j)*tmp1*(1 - b)**i)*fi*gj*hk
)
ob.append([cijk*pijk, cijk*qijk, cijk*rijk])
return ob
@lazyprop
def degrees(self):
return [i + j + k
for i in range(self.order)
for j in range(self.order - i)
for k in range(self.order - i - j)]
class PriPolyBasis(BasePolyBasis):
name = 'pri'
def ortho_basis_at_py(self, p, q, r):
a = 2*(1 + p)/(1 - q) - 1 if q != 1 else -1
b = q
c = r
pab = []
for i, pi in enumerate(jacobi(self.order - 1, 0, 0, a)):
ci = (1 - b)**i / 2**(i + 1)
for j, pj in enumerate(jacobi(self.order - i - 1, 2*i + 1, 0, b)):
cij = sqrt((2*i + 1)*(2*i + 2*j + 2))*ci
pab.append(cij*pi*pj)
sk = [sqrt(k + 0.5) for k in range(self.order)]
pc = [s*jp for s, jp in zip(sk, jacobi(self.order - 1, 0, 0, c))]
return [pij*pk for pij in pab for pk in pc]
def jac_ortho_basis_at_py(self, p, q, r):
a = 2*(1 + p)/(1 - q) - 1 if q != 1 else -1
b = q
c = r
f = jacobi(self.order - 1, 0, 0, a)
df = jacobi_diff(self.order - 1, 0, 0, a)
pab = []
for i, (fi, dfi) in enumerate(zip(f, df)):
g = jacobi(self.order - i - 1, 2*i + 1, 0, b)
dg = jacobi_diff(self.order - i - 1, 2*i + 1, 0, b)
for j, (gj, dgj) in enumerate(zip(g, dg)):
cij = sqrt((2*i + 1)*(2*i + 2*j + 2)) / 2**(i + 1)
tmp = (1 - b)**(i - 1) if i > 0 else 1
pij = 2*tmp*dfi*gj
qij = tmp*(-i*fi + (1 + a)*dfi)*gj + (1 - b)**i*fi*dgj
rij = (1 - b)**i*fi*gj
pab.append([cij*pij, cij*qij, cij*rij])
sk = [sqrt(k + 0.5) for k in range(self.order)]
hc = [s*jp for s, jp in zip(sk, jacobi(self.order - 1, 0, 0, c))]
dhc = [s*jp for s, jp in zip(sk, jacobi_diff(self.order - 1, 0, 0, c))]
return [[pij*hk, qij*hk, rij*dhk]
for pij, qij, rij in pab for hk, dhk in zip(hc, dhc)]
@lazyprop
def degrees(self):
return [i + j + k
for i in range(self.order)
for j in range(self.order - i)
for k in range(self.order)]
class PyrPolyBasis(BasePolyBasis):
name = 'pyr'
def ortho_basis_at_py(self, p, q, r):
a = 2*p/(1 - r) if r != 1 else 0
b = 2*q/(1 - r) if r != 1 else 0
c = r
sk = [2**(-k - 0.25)*sqrt(k + 0.5)
for k in range(self.order)]
pa = [s*jp for s, jp in zip(sk, jacobi(self.order - 1, 0, 0, a))]
pb = [s*jp for s, jp in zip(sk, jacobi(self.order - 1, 0, 0, b))]
ob = []
for i, pi in enumerate(pa):
for j, pj in enumerate(pb):
cij = (1 - c)**(i + j)
pij = pi*pj
pc = jacobi(self.order - max(i, j) - 1, 2*(i + j + 1), 0, c)
for k, pk in enumerate(pc):
ck = sqrt(2*(k + j + i) + 3)
ob.append(cij*ck*pij*pk)
return ob
def jac_ortho_basis_at_py(self, p, q, r):
a = 2*p/(1 - r) if r != 1 else 0
b = 2*q/(1 - r) if r != 1 else 0
c = r
sk = [2**(-k - 0.25)*sqrt(k + 0.5)
for k in range(self.order)]
fc = [s*jp for s, jp in zip(sk, jacobi(self.order - 1, 0, 0, a))]
gc = [s*jp for s, jp in zip(sk, jacobi(self.order - 1, 0, 0, b))]
dfc = [s*jp for s, jp in zip(sk, jacobi_diff(self.order - 1, 0, 0, a))]
dgc = [s*jp for s, jp in zip(sk, jacobi_diff(self.order - 1, 0, 0, b))]
ob = []
for i, (fi, dfi) in enumerate(zip(fc, dfc)):
for j, (gj, dgj) in enumerate(zip(gc, dgc)):
h = jacobi(self.order - max(i, j) - 1, 2*(i + j + 1), 0, c)
dh = jacobi_diff(
self.order - max(i, j) - 1, 2*(i + j + 1), 0, c
)
for k, (hk, dhk) in enumerate(zip(h, dh)):
ck = sqrt(2*(k + j + i) + 3)
tmp = (1 - c)**(i + j-1) if i + j > 0 else 1
pijk = 2*tmp*dfi*gj*hk
qijk = 2*tmp*fi*dgj*hk
rijk = (tmp*(a*dfi*gj + b*fi*dgj - (i + j)*fi*gj)*hk
+ (1 - c)**(i + j)*fi*gj*dhk)
ob.append([ck*pijk, ck*qijk, ck*rijk])
return ob
@lazyprop
def degrees(self):
return [i + j + k
for i in range(self.order)
for j in range(self.order)
for k in range(self.order - max(i, j))]
class HexPolyBasis(BasePolyBasis):
name = 'hex'
def ortho_basis_at_py(self, p, q, r):
sk = [sqrt(k + 0.5) for k in range(self.order)]
pa = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, p))]
pb = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, q))]
pc = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, r))]
return [pi*pj*pk for pi in pa for pj in pb for pk in pc]
def jac_ortho_basis_at_py(self, p, q, r):
sk = [sqrt(k + 0.5) for k in range(self.order)]
pa = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, p))]
pb = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, q))]
pc = [c*jp for c, jp in zip(sk, jacobi(self.order - 1, 0, 0, r))]
dpa = [c*jp for c, jp in zip(sk, jacobi_diff(self.order - 1, 0, 0, p))]
dpb = [c*jp for c, jp in zip(sk, jacobi_diff(self.order - 1, 0, 0, q))]
dpc = [c*jp for c, jp in zip(sk, jacobi_diff(self.order - 1, 0, 0, r))]
return [[dpi*pj*pk, pi*dpj*pk, pi*pj*dpk]
for pi, dpi in zip(pa, dpa)
for pj, dpj in zip(pb, dpb)
for pk, dpk in zip(pc, dpc)]
@lazyprop
def degrees(self):
return [i + j + k
for i in range(self.order)
for j in range(self.order)
for k in range(self.order)]
| |
import unittest
import ctypes
import struct
import sys
from calpack import models
from calpack.utils import PYPY, FieldAlreadyExistsError, FieldNameDoesntExistError
class Test_BasicPacket(unittest.TestCase):
def test_pkt_create_packet_object_with_defined_values(self):
"""
This test verifies that a `Packet` object can be created with defined field values.
"""
class two_int_field_packet(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
p = two_int_field_packet(int_field=12, int_field_signed=-12)
self.assertEqual(p.int_field, 12)
self.assertEqual(p.int_field_signed, -12)
def test_pkt_create_packet_with_default_field_value(self):
"""
This test verifies that a `Packet` definition can be created with default values for a
particular field.
"""
class two_int_field_packet(models.Packet):
int_field = models.IntField(default_val=12)
int_field_signed = models.IntField(signed=True, default_val=-12)
p = two_int_field_packet()
self.assertEqual(p.int_field, 12)
self.assertEqual(p.int_field_signed, -12)
def test_pkt_create_class_from_bytes_string(self):
"""
This test verifies that a class can be created from a byte string and
that the values are properly parsed.
"""
class two_int_field_packet(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
class c_two_int_field_packet(ctypes.Structure):
_fields_ = (
('int_field', ctypes.c_uint),
('int_field_signed', ctypes.c_int)
)
val_1 = 34
val_2 = -12
c_pkt = c_two_int_field_packet()
c_pkt.int_field = val_1
c_pkt.int_field_signed = val_2
b_val = ctypes.string_at(ctypes.addressof(c_pkt), ctypes.sizeof(c_two_int_field_packet))
pkt = two_int_field_packet.from_bytes(b_val)
self.assertEqual(pkt.int_field, val_1)
self.assertEqual(pkt._Packet__c_pkt.int_field, val_1)
self.assertEqual(pkt.int_field_signed, val_2)
self.assertEqual(pkt._Packet__c_pkt.int_field_signed, val_2)
def test_pkt_compare_two_same_packets(self):
"""
This test verifies that two packets of the same class and with the same field values when
compared, will be equal.
"""
class two_int_field_packet(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
pkt1 = two_int_field_packet(int_field=1, int_field_signed=-1)
pkt2 = two_int_field_packet(int_field=1, int_field_signed=-1)
self.assertEqual(pkt1, pkt2)
def test_pkt_compare_two_different_packets(self):
"""
This test verifies that two packets of different class types may have the same values and
even the same bytecode output, will be non-equal when compared.
"""
class two_int_field_packet(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
class two_int_field_packet_2(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
pkt_orig = two_int_field_packet(int_field=1, int_field_signed=-1)
pkt_different_class_same_byte_out = two_int_field_packet_2(
int_field=1,
int_field_signed=-1
)
# even though these packets will generate the same byte output and the
# fields are the same, this should raise an error as their classes are
# not the same.
self.assertFalse(pkt_orig == pkt_different_class_same_byte_out)
self.assertNotEqual(pkt_orig, pkt_different_class_same_byte_out)
class three_int_field_packet(models.Packet):
int_field = models.IntField()
int_field_2 = models.IntField()
int_field_3 = models.IntField()
pkt_different_class_different_byte_out = three_int_field_packet(
int_field=1,
int_field_2=2,
int_field_3=3
)
self.assertFalse(pkt_orig == pkt_different_class_different_byte_out)
self.assertNotEqual(pkt_orig, pkt_different_class_different_byte_out)
pkt_same_class_different_values = two_int_field_packet(
int_field=1,
int_field_signed=2
)
self.assertFalse(pkt_orig == pkt_same_class_different_values)
self.assertNotEqual(pkt_orig, pkt_same_class_different_values)
def test_pkt_export_to_bytes_string(self):
"""
This test verifies that a `Packet` class can create a properly sized bytes string from the
`Packet` field values.
"""
class two_int_field_packet(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
class c_two_int_field_packet(ctypes.Structure):
_fields_ = (
('int_field', ctypes.c_uint),
('int_field_signed', ctypes.c_int),
)
val_1 = 54
val_2 = -23
c_pkt = c_two_int_field_packet()
c_pkt.int_field = val_1
c_pkt.int_field_signed = val_2
b_val = ctypes.string_at(ctypes.addressof(c_pkt), ctypes.sizeof(c_two_int_field_packet))
pkt = two_int_field_packet()
pkt.int_field = val_1
pkt.int_field_signed = val_2
pkt_bin = pkt.to_bytes()
self.assertEqual(pkt_bin, b_val)
def test_pkt_to_bytes_string_then_import_from_binary(self):
"""
This test verifies that a `Packet` class instance can be created from a properly structured
byte string.
"""
class two_int_field_packet(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
val_1 = 96
val_2 = -3
p = two_int_field_packet()
p.int_field = val_1
p.int_field_signed = val_2
p2 = two_int_field_packet.from_bytes(p.to_bytes())
self.assertEqual(p.int_field, p2.int_field)
self.assertEqual(p.int_field_signed, p2.int_field_signed)
def test_pkt_two_instances_different_field_instances(self):
"""
This test verifies that two instances of the same `Packet` class can be created and does
not affect the values of the other packet instance class.
"""
class simple_pkt(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
p1 = simple_pkt()
p2 = simple_pkt()
p1.int_field = 1
p2.int_field = 2
p1.int_field_signed = -1
p2.int_field_signed = -2
self.assertFalse(p1 is p2)
self.assertFalse(p1.int_field is p2.int_field)
self.assertNotEqual(p1.int_field, p2.int_field)
self.assertNotEqual(p1.int_field_signed, p2.int_field_signed)
def test_pkt_len(self):
"""
This test verifies that using the len function on a `Packet` instance will return the
packet length.
"""
class simple_pkt(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
class c_simple_pkt(ctypes.Structure):
_fields_ = (
('int_field', ctypes.c_uint),
('int_field_signed', ctypes.c_int)
)
p1 = simple_pkt()
self.assertEqual(len(p1), ctypes.sizeof(c_simple_pkt))
def test_pkt_invalid_fieldname_on_init_raises_error(self):
class simple_pkt(models.Packet):
int_field = models.IntField()
int_field_signed = models.IntField(signed=True)
with self.assertRaises(FieldNameDoesntExistError):
pkt = simple_pkt(flaot_field=3.123)
with self.assertRaises(FieldNameDoesntExistError):
pkt = simple_pkt(int_field_invalid=123)
class Test_EndianPacket(unittest.TestCase):
def test_endian_little_endian_packet_from_bytes(self):
"""
This test verifies that a PacketLittleEndian packet can be created from a properly formated
little endian byte string.
"""
# As of 22 Feb '18, PyPy does not support non-native endianess
if PYPY:
return True
class little_packet(models.PacketLittleEndian):
field1 = models.IntField()
field2 = models.IntField()
b_val = struct.pack("<II", 0xdeadbeef, 0xbeefcafe)
pkt = little_packet.from_bytes(b_val)
self.assertEqual(pkt.field1, 0xdeadbeef)
self.assertEqual(pkt.field2, 0xbeefcafe)
def test_endian_little_endian_packet_to_bytes(self):
"""
This test verifies that a PacketLittleEndian packet can be created and will create the
properly formated byte string.
"""
# As of 22 Feb '18, PyPy does not support non-native endianess
if PYPY:
return True
class little_packet(models.PacketLittleEndian):
field1 = models.IntField()
field2 = models.IntField()
expected_b_val = struct.pack("<II", 0xdeadbeef, 0xbeefcafe)
pkt = little_packet(
field1 = 0xdeadbeef,
field2 = 0xbeefcafe
)
self.assertEqual(pkt.to_bytes(), expected_b_val)
def test_endian_big_endian_packet_from_bytes(self):
"""
This test verifies that a PacketBigEndian packet can be created from a properly formated
little endian byte string.
"""
# As of 22 Feb '18, PyPy does not support non-native endianess
if PYPY:
return True
class big_packet(models.PacketBigEndian):
field1 = models.IntField()
field2 = models.IntField()
b_val = struct.pack(">II", 0xdeadbeef, 0xbeefcafe)
pkt = big_packet.from_bytes(b_val)
self.assertEqual(pkt.field1, 0xdeadbeef)
self.assertEqual(pkt.field2, 0xbeefcafe)
def test_endian_big_endian_packet_to_bytes(self):
"""
This test verifies that a PacketBigEndian packet can be created and will create the
properly formated byte string.
"""
# As of 22 Feb '18, PyPy does not support non-native endianess
if PYPY:
return True
class big_packet(models.PacketBigEndian):
field1 = models.IntField()
field2 = models.IntField()
expected_b_val = struct.pack(">II", 0xdeadbeef, 0xbeefcafe)
pkt = big_packet(
field1 = 0xdeadbeef,
field2 = 0xbeefcafe
)
self.assertEqual(pkt.to_bytes(), expected_b_val)
if __name__ == '__main__':
unittest.main()
| |
"""DistributedObjectAI module: contains the DistributedObjectAI class"""
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.distributed.DistributedObjectBase import DistributedObjectBase
from direct.showbase import PythonUtil
from pandac.PandaModules import *
#from PyDatagram import PyDatagram
#from PyDatagramIterator import PyDatagramIterator
class DistributedObjectAI(DistributedObjectBase):
notify = directNotify.newCategory("DistributedObjectAI")
QuietZone = 1
def __init__(self, air):
try:
self.DistributedObjectAI_initialized
except:
self.DistributedObjectAI_initialized = 1
DistributedObjectBase.__init__(self, air)
self.accountName=''
# Record the repository
self.air = air
# Record our distributed class
className = self.__class__.__name__
self.dclass = self.air.dclassesByName[className]
# init doId pre-allocated flag
self.__preallocDoId = 0
# used to track zone changes across the quiet zone
# NOTE: the quiet zone is defined in OTP, but we need it
# here.
self.lastNonQuietZone = None
self._DOAI_requestedDelete = False
# These are used to implement beginBarrier().
self.__nextBarrierContext = 0
self.__barriers = {}
self.__generated = False
# reference count for multiple inheritance
self.__generates = 0
self._zoneData = None
# Uncomment if you want to debug DO leaks
#def __del__(self):
# """
# For debugging purposes, this just prints out what got deleted
# """
# print ("Destructing: " + self.__class__.__name__)
if __debug__:
def status(self, indent=0):
"""
print out doId(parentId, zoneId) className
and conditionally show generated, disabled, neverDisable,
or cachable
"""
spaces=' '*(indent+2)
try:
print "%s%s:"%(
' '*indent, self.__class__.__name__)
print "%sfrom DistributedObject doId:%s, parent:%s, zone:%s"%(
spaces,
self.doId, self.parentId, self.zoneId),
flags=[]
if self.__generated:
flags.append("generated")
if self.air == None:
flags.append("deleted")
if len(flags):
print "(%s)"%(" ".join(flags),),
print
except Exception, e: print "%serror printing status"%(spaces,), e
def getDeleteEvent(self):
# this is sent just before we get deleted
if hasattr(self, 'doId'):
return 'distObjDelete-%s' % self.doId
return None
def sendDeleteEvent(self):
# this is called just before we get deleted
delEvent = self.getDeleteEvent()
if delEvent:
messenger.send(delEvent)
def getCacheable(self):
""" This method exists only to mirror the similar method on
DistributedObject. AI objects aren't cacheable. """
return False
def deleteOrDelay(self):
""" This method exists only to mirror the similar method on
DistributedObject. AI objects don't have delayDelete, they
just get deleted immediately. """
self.delete()
def getDelayDeleteCount(self):
return 0
def delete(self):
"""
Inheritors should redefine this to take appropriate action on delete
Note that this may be called multiple times if a class inherits
from DistributedObjectAI more than once.
"""
self.__generates -= 1
if self.__generates < 0:
self.notify.debug('DistributedObjectAI: delete() called more times than generate()')
if self.__generates == 0:
# prevent this code from executing multiple times
if self.air is not None:
# self.doId may not exist. The __dict__ syntax works around that.
assert self.notify.debug('delete(): %s' % (self.__dict__.get("doId")))
if not self._DOAI_requestedDelete:
# this logs every delete that was not requested by us.
# TODO: this currently prints warnings for deletes of objects
# that we did not create. We need to add a 'locally created'
# flag to every object to filter these out.
"""
DistributedObjectAI.notify.warning(
'delete() called but requestDelete never called for %s: %s'
% (self.__dict__.get('doId'), self.__class__.__name__))
"""
"""
# print a stack trace so we can detect whether this is the
# result of a network msg.
# this is slow.
from direct.showbase.PythonUtil import StackTrace
DistributedObjectAI.notify.warning(
'stack trace: %s' % StackTrace())
"""
self._DOAI_requestedDelete = False
self.releaseZoneData()
# Clean up all the pending barriers.
for barrier in self.__barriers.values():
barrier.cleanup()
self.__barriers = {}
# DCR: I've re-enabled this block of code so that Toontown's
# AI won't leak channels.
# Let me know if it causes trouble.
### Asad: As per Roger's suggestion, turn off the following
### block until a solution is thought out of how to prevent
### this delete message or to handle this message better
# TODO: do we still need this check?
if not getattr(self, "doNotDeallocateChannel", False):
if self.air:
self.air.deallocateChannel(self.doId)
self.air = None
self.parentId = None
self.zoneId = None
self.__generated = False
def isDeleted(self):
"""
Returns true if the object has been deleted,
or if it is brand new and hasnt yet been generated.
"""
return self.air == None
def isGenerated(self):
"""
Returns true if the object has been generated
"""
return self.__generated
def getDoId(self):
"""
Return the distributed object id
"""
return self.doId
def preAllocateDoId(self):
"""
objects that need to have a doId before they are generated
can call this to pre-allocate a doId for the object
"""
assert not self.__preallocDoId
self.doId = self.air.allocateChannel()
self.__preallocDoId = 1
def announceGenerate(self):
"""
Called after the object has been generated and all
of its required fields filled in. Overwrite when needed.
"""
pass
def b_setLocation(self, parentId, zoneId):
self.d_setLocation(parentId, zoneId)
self.setLocation(parentId, zoneId)
def d_setLocation(self, parentId, zoneId):
self.air.sendSetLocation(self, parentId, zoneId)
def setLocation(self, parentId, zoneId):
# Prevent Duplicate SetLocations for being Called
if (self.parentId == parentId) and (self.zoneId == zoneId):
return
oldParentId = self.parentId
oldZoneId = self.zoneId
self.air.storeObjectLocation(self, parentId, zoneId)
if ((oldParentId != parentId) or
(oldZoneId != zoneId)):
self.releaseZoneData()
messenger.send(self.getZoneChangeEvent(), [zoneId, oldZoneId])
# if we are not going into the quiet zone, send a 'logical' zone
# change message
if zoneId != DistributedObjectAI.QuietZone:
lastLogicalZone = oldZoneId
if oldZoneId == DistributedObjectAI.QuietZone:
lastLogicalZone = self.lastNonQuietZone
self.handleLogicalZoneChange(zoneId, lastLogicalZone)
self.lastNonQuietZone = zoneId
def getLocation(self):
try:
if self.parentId <= 0 and self.zoneId <= 0:
return None
# This is a -1 stuffed into a uint32
if self.parentId == 0xffffffff and self.zoneId == 0xffffffff:
return None
return (self.parentId, self.zoneId)
except AttributeError:
return None
def postGenerateMessage(self):
self.__generated = True
messenger.send(self.uniqueName("generate"), [self])
def updateRequiredFields(self, dclass, di):
dclass.receiveUpdateBroadcastRequired(self, di)
self.announceGenerate()
self.postGenerateMessage()
def updateAllRequiredFields(self, dclass, di):
dclass.receiveUpdateAllRequired(self, di)
self.announceGenerate()
self.postGenerateMessage()
def updateRequiredOtherFields(self, dclass, di):
dclass.receiveUpdateBroadcastRequired(self, di)
# Announce generate after updating all the required fields,
# but before we update the non-required fields.
self.announceGenerate()
self.postGenerateMessage()
dclass.receiveUpdateOther(self, di)
def updateAllRequiredOtherFields(self, dclass, di):
dclass.receiveUpdateAllRequired(self, di)
# Announce generate after updating all the required fields,
# but before we update the non-required fields.
self.announceGenerate()
self.postGenerateMessage()
dclass.receiveUpdateOther(self, di)
def startMessageBundle(self, name):
self.air.startMessageBundle(name)
def sendMessageBundle(self):
self.air.sendMessageBundle(self.doId)
def getZoneChangeEvent(self):
# this event is generated whenever this object changes zones.
# arguments are newZoneId, oldZoneId
# includes the quiet zone.
return DistributedObjectAI.staticGetZoneChangeEvent(self.doId)
def getLogicalZoneChangeEvent(self):
# this event is generated whenever this object changes to a
# non-quiet-zone zone.
# arguments are newZoneId, oldZoneId
# does not include the quiet zone.
return DistributedObjectAI.staticGetLogicalZoneChangeEvent(self.doId)
@staticmethod
def staticGetZoneChangeEvent(doId):
return 'DOChangeZone-%s' % doId
@staticmethod
def staticGetLogicalZoneChangeEvent(doId):
return 'DOLogicalChangeZone-%s' % doId
def handleLogicalZoneChange(self, newZoneId, oldZoneId):
"""this function gets called as if we never go through the
quiet zone. Note that it is called once you reach the newZone,
and not at the time that you leave the oldZone."""
messenger.send(self.getLogicalZoneChangeEvent(),
[newZoneId, oldZoneId])
def getZoneData(self):
# Call this to get an AIZoneData object for the current zone.
# This class will hold onto it as self._zoneData
# setLocation destroys self._zoneData if we move away to
# a different zone
if self._zoneData is None:
from otp.ai.AIZoneData import AIZoneData
self._zoneData = AIZoneData(self.air, self.parentId, self.zoneId)
return self._zoneData
def releaseZoneData(self):
# You can call this to release any AIZoneData object that we might be
# holding onto. If we're the last one for the current zone, the data
# will be destroyed (render, collision traverser, etc.)
# Note that the AIZoneData object that we're holding will be destroyed
# automatically when we move away or are destroyed.
if self._zoneData is not None:
self._zoneData.destroy()
self._zoneData = None
def getRender(self):
# note that this will return a different node if we change zones
#return self.air.getRender(self.zoneId)
return self.getZoneData().getRender()
def getNonCollidableParent(self):
return self.getZoneData().getNonCollidableParent()
def getParentMgr(self):
#return self.air.getParentMgr(self.zoneId)
return self.getZoneData().getParentMgr()
def getCollTrav(self, *args, **kArgs):
return self.getZoneData().getCollTrav(*args, **kArgs)
def sendUpdate(self, fieldName, args = []):
assert self.notify.debugStateCall(self)
if self.air:
self.air.sendUpdate(self, fieldName, args)
def GetPuppetConnectionChannel(self, doId):
return doId + (1001L << 32)
def GetAccountConnectionChannel(self, doId):
return doId + (1003L << 32)
def GetAccountIDFromChannelCode(self, channel):
return channel >> 32
def GetAvatarIDFromChannelCode(self, channel):
return channel & 0xffffffffL
def sendUpdateToAvatarId(self, avId, fieldName, args):
assert self.notify.debugStateCall(self)
channelId = self.GetPuppetConnectionChannel(avId)
self.sendUpdateToChannel(channelId, fieldName, args)
def sendUpdateToAccountId(self, accountId, fieldName, args):
assert self.notify.debugStateCall(self)
channelId = self.GetAccountConnectionChannel(accountId)
self.sendUpdateToChannel(channelId, fieldName, args)
def sendUpdateToChannel(self, channelId, fieldName, args):
assert self.notify.debugStateCall(self)
if self.air:
self.air.sendUpdateToChannel(self, channelId, fieldName, args)
def generateWithRequired(self, zoneId, optionalFields=[]):
assert self.notify.debugStateCall(self)
# have we already allocated a doId?
if self.__preallocDoId:
self.__preallocDoId = 0
return self.generateWithRequiredAndId(
self.doId, zoneId, optionalFields)
# The repository is the one that really does the work
parentId = self.air.districtId
self.air.generateWithRequired(self, parentId, zoneId, optionalFields)
self.generate()
self.announceGenerate()
self.postGenerateMessage()
# this is a special generate used for estates, or anything else that
# needs to have a hard coded doId as assigned by the server
def generateWithRequiredAndId(self, doId, parentId, zoneId, optionalFields=[]):
assert self.notify.debugStateCall(self)
# have we already allocated a doId?
if self.__preallocDoId:
assert doId == self.doId
self.__preallocDoId = 0
# The repository is the one that really does the work
self.air.generateWithRequiredAndId(self, doId, parentId, zoneId, optionalFields)
self.generate()
self.announceGenerate()
self.postGenerateMessage()
def generateOtpObject(self, parentId, zoneId, optionalFields=[], doId=None):
assert self.notify.debugStateCall(self)
# have we already allocated a doId?
if self.__preallocDoId:
assert doId is None or doId == self.doId
doId=self.doId
self.__preallocDoId = 0
# Assign it an id
if doId is None:
self.doId = self.air.allocateChannel()
else:
self.doId = doId
# Put the new DO in the dictionaries
self.air.addDOToTables(self, location=(parentId, zoneId))
# Send a generate message
self.sendGenerateWithRequired(self.air, parentId, zoneId, optionalFields)
self.generate()
self.announceGenerate()
self.postGenerateMessage()
def generate(self):
"""
Inheritors should put functions that require self.zoneId or
other networked info in this function.
"""
assert self.notify.debugStateCall(self)
self.__generates += 1
def generateInit(self, repository=None):
"""
First generate (not from cache).
"""
assert self.notify.debugStateCall(self)
def generateTargetChannel(self, repository):
"""
Who to send this to for generate messages
"""
if hasattr(self, "dbObject"):
return self.doId
return repository.serverId
def sendGenerateWithRequired(self, repository, parentId, zoneId, optionalFields=[]):
assert self.notify.debugStateCall(self)
dg = self.dclass.aiFormatGenerate(
self, self.doId, parentId, zoneId,
#repository.serverId,
self.generateTargetChannel(repository),
repository.ourChannel,
optionalFields)
repository.send(dg)
def initFromServerResponse(self, valDict):
assert self.notify.debugStateCall(self)
# This is a special method used for estates, etc., which get
# their fields set from the database indirectly by way of the
# AI. The input parameter is a dictionary of field names to
# datagrams that describes the initial field values from the
# database.
dclass = self.dclass
for key, value in valDict.items():
# Update the field
dclass.directUpdate(self, key, value)
def requestDelete(self):
assert self.notify.debugStateCall(self)
if not self.air:
doId = "none"
if hasattr(self, "doId"):
doId = self.doId
self.notify.warning(
"Tried to delete a %s (doId %s) that is already deleted" %
(self.__class__, doId))
return
self.air.requestDelete(self)
self._DOAI_requestedDelete = True
def taskName(self, taskString):
return ("%s-%s" % (taskString, self.doId))
def uniqueName(self, idString):
return ("%s-%s" % (idString, self.doId))
def validate(self, avId, bool, msg):
if not bool:
self.air.writeServerEvent('suspicious', avId, msg)
self.notify.warning('validate error: avId: %s -- %s' % (avId, msg))
return bool
def beginBarrier(self, name, avIds, timeout, callback):
# Begins waiting for a set of avatars. When all avatars in
# the list have reported back in or the callback has expired,
# calls the indicated callback with the list of avatars that
# made it through. There may be multiple barriers waiting
# simultaneously on different lists of avatars, although they
# should have different names.
from otp.ai import Barrier
context = self.__nextBarrierContext
# We assume the context number is passed as a uint16.
self.__nextBarrierContext = (self.__nextBarrierContext + 1) & 0xffff
assert self.notify.debug('beginBarrier(%s, %s, %s, %s)' % (context, name, avIds, timeout))
if avIds:
barrier = Barrier.Barrier(
name, self.uniqueName(name), avIds, timeout,
doneFunc = PythonUtil.Functor(
self.__barrierCallback, context, callback))
self.__barriers[context] = barrier
# Send the context number to each involved client.
self.sendUpdate("setBarrierData", [self.getBarrierData()])
else:
# No avatars; just call the callback immediately.
callback(avIds)
return context
def getBarrierData(self):
# Returns the barrier data formatted for sending to the
# clients. This lists all of the current outstanding barriers
# and the avIds waiting for them.
data = []
for context, barrier in self.__barriers.items():
avatars = barrier.pendingAvatars
if avatars:
data.append((context, barrier.name, avatars))
return data
def ignoreBarrier(self, context):
# Aborts a previously-set barrier. The context is the return
# value from the previous call to beginBarrier().
barrier = self.__barriers.get(context)
if barrier:
barrier.cleanup()
del self.__barriers[context]
def setBarrierReady(self, context):
# Generated by the clients to check in after a beginBarrier()
# call.
avId = self.air.getAvatarIdFromSender()
assert self.notify.debug('setBarrierReady(%s, %s)' % (context, avId))
barrier = self.__barriers.get(context)
if barrier == None:
# This may be None if a client was slow and missed an
# earlier timeout. Too bad.
return
barrier.clear(avId)
def __barrierCallback(self, context, callback, avIds):
assert self.notify.debug('barrierCallback(%s, %s)' % (context, avIds))
# The callback that is generated when a barrier is completed.
barrier = self.__barriers.get(context)
if barrier:
barrier.cleanup()
del self.__barriers[context]
callback(avIds)
else:
self.notify.warning("Unexpected completion from barrier %s" % (context))
def isGridParent(self):
# If this distributed object is a DistributedGrid return 1. 0 by default
return 0
def execCommand(self, string, mwMgrId, avId, zoneId):
pass
def _retrieveCachedData(self):
""" This is a no-op on the AI. """
pass
| |
# Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from tincan.serializable_base import SerializableBase
from tincan.agent import Agent
from tincan.group import Group
from tincan.verb import Verb
from tincan.context import Context
from tincan.attachment import Attachment
from tincan.attachment_list import AttachmentList
from tincan.conversions.iso8601 import make_datetime
"""
.. module:: StatementBase
:synopsis: The base object for both Statement and SubStatement
"""
class StatementBase(SerializableBase):
_props_req = [
'actor',
'verb',
'object',
'timestamp',
]
_props = [
'context',
'attachments'
]
_props.extend(_props_req)
def __init__(self, *args, **kwargs):
self._actor = None
self._verb = None
self._object = None
self._timestamp = None
self._context = None
self._attachments = None
super(StatementBase, self).__init__(*args, **kwargs)
@property
def actor(self):
"""Actor for StatementBase
:setter: Tries to convert to :class:`tincan.Agent` or :class:`tincan.Group`
:setter type: :class:`tincan.Agent` | :class:`tincan.Group`
:rtype: :class:`tincan.Agent` | :class:`tincan.Group`
"""
return self._actor
@actor.setter
def actor(self, value):
if value is not None and not isinstance(value, Agent) and not isinstance(value, Group):
if isinstance(value, dict):
if 'object_type' in value or 'objectType' in value:
if 'objectType' in value:
value['object_type'] = value['objectType']
value.pop('objectType')
if value['object_type'] == 'Agent':
value = Agent(value)
elif value['object_type'] == 'Group':
value = Group(value)
else:
value = Agent(value)
else:
value = Agent(value)
self._actor = value
@actor.deleter
def actor(self):
del self._actor
@property
def verb(self):
"""Verb for StatementBase
:setter: Tries to convert to :class:`tincan.Verb`
:setter type: :class:`tincan.Verb`
:rtype: :class:`tincan.Verb`
"""
return self._verb
@verb.setter
def verb(self, value):
if value is not None and not isinstance(value, Verb):
value = Verb(value)
self._verb = value
@verb.deleter
def verb(self):
del self._verb
@property
def timestamp(self):
"""Timestamp for StatementBase
:setter: Tries to convert to :class:`datetime.datetime`. If
no timezone is given, makes a naive `datetime.datetime`.
Strings will be parsed as ISO 8601 timestamps.
If a number is provided, it will be interpreted as a UNIX
timestamp, which by definition is UTC.
If a `dict` is provided, does `datetime.datetime(**value)`.
If a `tuple` or a `list` is provided, does
`datetime.datetime(*value)`. Uses the timezone in the tuple or
list if provided.
:setter type: :class:`datetime.datetime` | unicode | str | int | float | dict | tuple | list | None
:rtype: :class:`datetime.datetime`
"""
return self._timestamp
@timestamp.setter
def timestamp(self, value):
if value is None or isinstance(value, datetime):
self._timestamp = value
return
try:
self._timestamp = make_datetime(value)
except TypeError as e:
e.message = (
"Property 'timestamp' in a 'tincan.%s' "
"object must be set with a "
"datetime.datetime, str, unicode, int, float, dict "
"or None.\n\n%s" %
(
self.__class__.__name__,
e.message,
)
)
raise e
@timestamp.deleter
def timestamp(self):
del self._timestamp
@property
def context(self):
"""Context for StatementBase
:setter: Tries to convert to :class:`tincan.Context`
:setter type: :class:`tincan.Context`
:rtype: :class:`tincan.Context`
"""
return self._context
@context.setter
def context(self, value):
if value is not None and not isinstance(value, Context):
value = Context(value)
self._context = value
@context.deleter
def context(self):
del self._context
@property
def attachments(self):
"""Attachments for StatementBase
:setter: Tries to convert each element to :class:`tincan.Attachment`
:setter type: :class:`tincan.AttachmentList`
:rtype: :class:`tincan.AttachmentList`
"""
return self._attachments
@attachments.setter
def attachments(self, value):
if value is not None and not isinstance(value, AttachmentList):
try:
value = AttachmentList([Attachment(value)])
except (TypeError, AttributeError):
value = AttachmentList(value)
self._attachments = value
@attachments.deleter
def attachments(self):
del self._attachments
| |
# Copyright 2021 Sean Robertson
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytests for `pydrobert.kaldi.io.table_streams`"""
import platform
import numpy as np
import pytest
from pydrobert.kaldi.io import open as io_open
from pydrobert.kaldi.io import table_streams
from pydrobert.kaldi.io.enums import KaldiDataType
@pytest.mark.parametrize(
"dtype,value",
[
("bv", []),
("bm", [[]]),
("bv", [np.infty]),
("bv", [1] * 100),
("bm", [[1, 2], [3, 4]]),
("fv", [-1, -1, 0, 0.1]),
("fm", np.random.random((10, 10)).astype(np.float32)),
("dv", np.arange(1000, dtype=np.float64) - 10),
(
"dm",
np.outer(
np.arange(100, dtype=np.float32), np.arange(111, dtype=np.float32)
),
), # upcast ok
("t", "able"),
# our methods can accept unicode, but always return strings,
# so we don't enforce that these be unicode type.
("t", "\u00D6a"),
("t", "n\u00F9"),
# lists can be written, but tuples are read
("tv", tuple()),
("tv", ("foo", "bar")),
("tv", ("skryyyyy",)),
("tv", ("\u00D6a", "n\u00F9")),
("i", -10),
("iv", (0, 1, 2)),
("iv", tuple()),
("ivv", ((100,), (10, 40))),
("ivv", tuple()),
("ipv", ((1, 2), (3, 4))),
("ipv", tuple()),
("d", 0.1),
("d", 1),
("b", -0.1),
("b", -10000),
("bpv", ((0, 1.3), (4.5, 6))),
("bpv", tuple()),
("B", True),
("B", False),
],
)
@pytest.mark.parametrize("is_text", [True, False])
@pytest.mark.parametrize("bg", [True, False])
def test_read_write(temp_file_1_name, dtype, value, is_text, bg):
opts = ["", "t"] if is_text else [""]
specifier = "ark" + ",".join(opts) + ":" + temp_file_1_name
writer = io_open(specifier, dtype, mode="w")
writer.write("a", value)
writer.close()
if bg:
opts += ["bg"]
specifier = "ark" + ",".join(opts) + ":" + temp_file_1_name
reader = io_open(specifier, dtype)
once = True
for read_value in iter(reader):
assert once, "Multiple values"
try:
if dtype.startswith("b") or dtype.startswith("f") or dtype.startswith("d"):
assert np.allclose(read_value, value)
else:
assert read_value == value
except TypeError:
assert read_value == value
once = False
reader.close()
@pytest.mark.parametrize(
"ktype,dtype,value",
[
("b", np.float32, 3.14), # upcast ok (if applicable)
("bpv", np.float32, ((0, 1.2), (3.4, 5), (6, 7.89))), # upcast ok (if app)
("i", np.int32, 420),
("iv", np.int32, (1, 1, 2, 3, 5, 8, 13, 21)),
("ivv", np.int32, ((0, 1), (2, 3), (4, 5))),
("ipv", np.int32, ((0, 1), (2, 3), (4, 5))),
("t", str, "foo"),
("tv", str, ("foo", "bar")),
],
)
def test_write_read_numpy_versions(temp_file_1_name, ktype, dtype, value):
npy_value = np.array(value).astype(dtype)
with io_open("ark:" + temp_file_1_name, ktype, mode="w") as writer:
writer.write("key", npy_value)
with io_open("ark:" + temp_file_1_name, ktype) as reader:
act_value = next(reader)
if ktype in ("b", "bpv"):
assert np.allclose(value, act_value)
else:
assert value == act_value
def test_write_int32_correct_size(temp_file_1_name):
with io_open("ark:" + temp_file_1_name, "i", mode="w") as writer:
writer.write("9", 182)
# size should be 9
# 2 bytes for '9 '
# 2 byte for binary marker \0B
# 1 byte for size of type in bytes (4)
# 4 bytes for actual int
with open(temp_file_1_name, "rb") as file_obj:
buf = file_obj.read()
assert len(buf) == 9
def test_cache(temp_file_1_name):
with io_open("ark:" + temp_file_1_name, "B", mode="w") as writer:
writer.write("a", True)
writer.write("b", False)
with io_open("ark:" + temp_file_1_name, "B", mode="r+", cache=True) as r:
assert r.cache_dict == dict()
assert "a" not in r.cache_dict
assert "a" in r
assert r["a"]
assert r.cache_dict == {"a": True}
assert "a" in r.cache_dict
assert "b" not in r.cache_dict
assert "b" in r
assert not r["b"]
assert r.cache_dict == {"a": True, "b": False}
r.cache_dict["b"] = True
assert r["b"]
def test_invalid_tv_does_not_segfault(temp_file_1_name):
# weird bug I found
tv = "foo bar"
writer = io_open("ark:" + temp_file_1_name, "tv", mode="w")
with pytest.raises(Exception):
writer.write("foo", tv)
with pytest.raises(Exception):
writer.write("foo", np.array(tv))
@pytest.mark.parametrize(
"ktype,value",
[
("fv", (0, 1, 2, 3, 4, 5)),
("dv", (0, 1, 2, 3, 4, 5)),
("dm", ((0, 1, 2), (3, 4, 5))),
("fm", ((0, 1, 2), (3, 4, 5))),
("wm", np.random.randint(-255, 255, size=(3, 10)).astype(np.int16)),
("t", "hrnnngh"),
("tv", ("who", "am", "I")),
("i", -420),
("iv", (7, 8, 9)),
("ivv", ((0, 1), (2,))),
("ipv", ((-1, -10), (-5, 4))),
("d", 0.4),
# the base floats can be cast to ints. It's important for the speed
# of testing that certain floats are small/negative
("b", 1.401298464324817e-44),
("b", -1.401298464324817e-44),
("bpv", ((1.401298464324817e-44, 2.5), (3, 4.5))),
("bpv", ((-1.401298464324817e-44, 2.5), (3, 4.5))),
("B", True),
],
)
@pytest.mark.parametrize("is_text", [True, False])
@pytest.mark.parametrize("bg", [True, False])
def test_incorrect_open_read(
temp_file_1_name, temp_file_2_name, ktype, value, is_text, bg
):
if ktype == "wm" and is_text:
pytest.skip("WaveMatrix can only be written as binary")
opts = ["", "t"] if is_text else [""]
specifier_1 = "ark" + ",".join(opts) + ":" + temp_file_1_name
specifier_2 = "ark" + ",".join(opts) + ":" + temp_file_2_name
with io_open(specifier_1, ktype, mode="w") as writer_1, io_open(
specifier_2, ktype, mode="w"
) as writer_2:
writer_1.write("0", value)
writer_2.write("0", value)
if bg:
opts += ["bg"]
specifier_1 = "ark" + ",".join(opts) + ":" + temp_file_1_name
specifier_2 = "ark" + ",".join(opts) + ":" + temp_file_2_name
for bad_ktype in KaldiDataType:
try:
with io_open(specifier_1, bad_ktype) as reader:
next(reader)
except Exception:
# sometimes it'll work, and the expected output will be
# correct (in the case of basic types). We don't care. All
# we care about here is that we don't segfault
pass
# now we add some garbage data to the end of the file and try to
# iterate through. Chances are this will end in failure (hopefully
# not a segfault)
with open(temp_file_1_name, mode="ab") as writer:
writer.write(np.random.bytes(1000))
try:
with io_open(specifier_1, ktype) as reader:
list(reader)
except Exception:
pass
# do the same, but only corrupt *after* the key
with open(temp_file_2_name, mode="ab") as writer:
writer.write(b"1 " + np.random.bytes(1000))
try:
with io_open(specifier_2, ktype) as reader:
list(reader)
except Exception:
pass
def test_invalid_scp(temp_file_1_name):
# make sure invalid scp files don't segfault
with open(temp_file_1_name, mode="wb") as writer:
writer.write(np.random.bytes(1000))
try:
with io_open("scp:" + temp_file_1_name) as reader:
next(reader)
except Exception:
pass
with open(temp_file_1_name, mode="wb") as writer:
writer.write(b"foo " + np.random.bytes(1000))
try:
with io_open("scp:" + temp_file_1_name) as reader:
next(reader)
except Exception:
pass
@pytest.mark.parametrize(
"dtype,value",
[
("bv", ["a", 2, 3]),
("bv", "abc"),
("bv", [[1, 2]]),
("fv", np.arange(3, dtype=np.float64)), # downcast not ok
("bm", [["a", 2]]),
("bm", [0]),
("fm", np.random.random((10, 1)).astype(np.float64)),
("t", 1),
("t", []),
("t", "was I"),
("tv", ["a", 1]),
("tv", ("it's", "me DIO")),
("tv", "foobar"),
("tv", "foo bar"),
("i", "zimble"),
("iv", 1),
("ivv", [[[1]]]),
("ipv", ((1, 2), (3,))),
("d", 1 + 1j),
("b", "akljdal"),
("bpv", ((1,), (2, 3))),
],
)
@pytest.mark.parametrize("is_text", [True, False])
def test_write_invalid(temp_file_1_name, dtype, value, is_text):
if is_text:
specifier = "ark,t:{}".format(temp_file_1_name)
else:
specifier = "ark:{}".format(temp_file_1_name)
writer = io_open(specifier, dtype, mode="w")
with pytest.raises(Exception):
writer.write("a", value)
def test_read_sequential(temp_file_1_name):
values = (
[[1, 2] * 10] * 10,
np.eye(1000, dtype=np.float32),
[[]],
np.outer(np.arange(1000, dtype=np.float32), np.arange(1000, dtype=np.float32)),
)
writer = io_open("ark:{}".format(temp_file_1_name), "fm", mode="w")
for key, value in enumerate(values):
writer.write(str(key), value)
writer.close()
count = 0
reader = io_open("ark:{}".format(temp_file_1_name), "fm")
for act_value, reader_value in zip(values, iter(reader)):
assert np.allclose(act_value, reader_value)
count += 1
assert count == len(values)
reader.close()
# check that the keys are all savvy
reader = io_open("ark:{}".format(temp_file_1_name), "fm")
for idx, tup in enumerate(reader.items()):
key, value = tup
assert str(idx) == key
def test_read_random(temp_file_1_name):
writer = io_open("ark:{}".format(temp_file_1_name), "dv", mode="w")
writer.write("able", [])
writer.write("was", [2])
writer.write("I", [3, 3])
writer.write("ere", [4, 4])
writer.close()
reader = io_open("ark,o:{}".format(temp_file_1_name), "dv", mode="r+")
assert np.allclose(reader["I"], [3, 3])
assert np.allclose(reader["able"], [])
assert np.allclose(reader["was"], [2])
def test_write_script_and_archive(temp_file_1_name, temp_file_2_name):
values = {
"foo": np.ones((21, 32), dtype=np.float64),
"bar": np.zeros((10, 1000), dtype=np.float64),
"baz": -1e10 * np.eye(20, dtype=np.float64),
}
keys = list(values)
writer = io_open(
"ark,scp:{},{}".format(temp_file_1_name, temp_file_2_name), "dm", mode="w"
)
# to make a missing entry, append it to the file's end with a subproc
for key in keys:
writer.write(key, values[key])
writer.close()
keys.reverse()
reader = io_open("scp:{}".format(temp_file_2_name), "dm", mode="r+")
for key in keys:
assert np.allclose(reader[key], values[key]), key
assert np.allclose(reader["bar"], values["bar"]), "Failed doublecheck"
@pytest.mark.skipif(platform.system() == "Windows", reason="Not posix")
def test_read_write_pipe_posix(temp_file_1_name):
value = np.ones((1000, 10000), dtype=np.float32)
writer = io_open("ark:| gzip -c > {}".format(temp_file_1_name), "fm", mode="w")
writer.write("bar", value)
writer.close()
reader = io_open("ark:gunzip -c {}|".format(temp_file_1_name), "fm", mode="r+")
assert np.allclose(reader["bar"], value)
def test_context_open(temp_file_1_name):
specifier = "ark:{}".format(temp_file_1_name)
with io_open(specifier, "bm", mode="w") as kaldi_io:
assert isinstance(kaldi_io, table_streams.KaldiTable)
assert isinstance(kaldi_io, table_streams.KaldiWriter)
with io_open(specifier, "bm") as kaldi_io:
assert isinstance(kaldi_io, table_streams.KaldiSequentialReader)
with io_open(specifier, "bm", mode="r") as kaldi_io:
assert isinstance(kaldi_io, table_streams.KaldiSequentialReader)
with io_open(specifier, "bm", mode="r+") as kaldi_io:
assert isinstance(kaldi_io, table_streams.KaldiRandomAccessReader)
def test_filehandle_open(temp_file_1_name):
specifier = "ark:{}".format(temp_file_1_name)
kaldi_io = io_open(specifier, "bm", mode="w")
assert isinstance(kaldi_io, table_streams.KaldiTable)
assert isinstance(kaldi_io, table_streams.KaldiWriter)
kaldi_io = io_open(specifier, "bm")
assert isinstance(kaldi_io, table_streams.KaldiSequentialReader)
kaldi_io = io_open(specifier, "bm", mode="r")
assert isinstance(kaldi_io, table_streams.KaldiSequentialReader)
kaldi_io = io_open(specifier, "bm", mode="r+")
assert isinstance(kaldi_io, table_streams.KaldiRandomAccessReader)
def test_open_string_or_data_type(temp_file_1_name):
specifier = "ark:{}".format(temp_file_1_name)
io_open(specifier, "bm", mode="w")
io_open(specifier, table_streams.KaldiDataType.BaseMatrix, mode="w")
io_open(specifier, "bm", mode="r")
io_open(specifier, table_streams.KaldiDataType.BaseMatrix, mode="r")
io_open(specifier, "bm", mode="r+")
io_open(specifier, table_streams.KaldiDataType.BaseMatrix, mode="r+")
def test_invalid_data_type(temp_file_1_name):
specifier = "ark:{}".format(temp_file_1_name)
with pytest.raises(ValueError):
io_open(specifier, "foo", mode="w")
def test_no_exception_on_double_close(temp_file_1_name):
specifier = "ark:{}".format(temp_file_1_name)
kaldi_io = io_open(specifier, "bm", mode="w")
kaldi_io.close()
kaldi_io.close()
def test_wave_read_write_valid(temp_file_1_name):
specifier = "ark:{}".format(temp_file_1_name)
writer = io_open(specifier, "wm", mode="w")
n_waves = 10
keys = [str(i) for i in range(n_waves)]
n_samples = [np.random.randint(1, 100000) for _ in keys]
n_channels = [np.random.randint(1, 3) for _ in keys]
# always written as pcm 16
bufs = [
(np.random.random((y, x)) * 30000 - 15000).astype(np.int16)
for x, y in zip(n_samples, n_channels)
]
for key, buf in zip(keys, bufs):
writer.write(key, buf)
writer.close()
reader = io_open(specifier, "wm", value_style="sbd")
for vals, expected_buf in zip(reader, bufs):
sample_rate, actual_buf, dur = vals
assert int(sample_rate) == 16000
assert isinstance(dur, float)
assert np.allclose(actual_buf, expected_buf)
n_waves -= 1
assert not n_waves, "Incorrect number of reads!"
| |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Django settings for graphite project.
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
import os
import sys
from os.path import abspath, dirname, join
from warnings import warn
from django.core.urlresolvers import reverse_lazy
GRAPHITE_WEB_APP_SETTINGS_LOADED = False
WEBAPP_VERSION = '0.10.0-alpha'
DEBUG = False
JAVASCRIPT_DEBUG = False
# Filesystem layout
WEB_DIR = dirname( abspath(__file__) )
WEBAPP_DIR = dirname(WEB_DIR)
GRAPHITE_ROOT = dirname(WEBAPP_DIR)
# Initialize additional path variables
# Defaults for these are set after local_settings is imported
STATIC_ROOT = ''
STATIC_URL = '/static/'
URL_PREFIX = ''
CONF_DIR = ''
DASHBOARD_CONF = ''
GRAPHTEMPLATES_CONF = ''
STORAGE_DIR = ''
WHITELIST_FILE = ''
INDEX_FILE = ''
LOG_DIR = ''
CERES_DIR = ''
WHISPER_DIR = ''
RRD_DIR = ''
STANDARD_DIRS = []
CLUSTER_SERVERS = []
# Cluster settings
CLUSTER_SERVERS = []
REMOTE_FIND_TIMEOUT = 3.0
REMOTE_FETCH_TIMEOUT = 6.0
REMOTE_RETRY_DELAY = 60.0
REMOTE_READER_CACHE_SIZE_LIMIT = 1000
CARBON_METRIC_PREFIX='carbon'
CARBONLINK_HOSTS = ["127.0.0.1:7002"]
CARBONLINK_TIMEOUT = 1.0
CARBONLINK_HASHING_KEYFUNC = None
CARBONLINK_RETRY_DELAY = 15
REPLICATION_FACTOR = 1
MEMCACHE_HOSTS = []
MEMCACHE_KEY_PREFIX = ''
FIND_CACHE_DURATION = 300
FIND_TOLERANCE = 2 * FIND_CACHE_DURATION
DEFAULT_CACHE_DURATION = 60 #metric data and graphs are cached for one minute by default
LOG_CACHE_PERFORMANCE = False
LOG_ROTATE = True
MAX_FETCH_RETRIES = 2
#Remote rendering settings
REMOTE_RENDERING = False #if True, rendering is delegated to RENDERING_HOSTS
RENDERING_HOSTS = []
REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
LOG_RENDERING_PERFORMANCE = False
#Miscellaneous settings
SMTP_SERVER = "localhost"
DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
ALLOW_ANONYMOUS_CLI = True
LOG_METRIC_ACCESS = False
LEGEND_MAX_ITEMS = 10
RRD_CF = 'AVERAGE'
STORAGE_FINDERS = (
'graphite.finders.standard.StandardFinder',
)
#Authentication settings
USE_LDAP_AUTH = False
LDAP_SERVER = "" # "ldapserver.mydomain.com"
LDAP_PORT = 389
LDAP_USE_TLS = False
LDAP_SEARCH_BASE = "" # "OU=users,DC=mydomain,DC=com"
LDAP_BASE_USER = "" # "CN=some_readonly_account,DC=mydomain,DC=com"
LDAP_BASE_PASS = "" # "my_password"
LDAP_USER_QUERY = "" # "(username=%s)" For Active Directory use "(sAMAccountName=%s)"
LDAP_URI = None
#Set this to True to delegate authentication to the web server
USE_REMOTE_USER_AUTHENTICATION = False
REMOTE_USER_BACKEND = "" # Provide an alternate or subclassed backend
# Django 1.5 requires this so we set a default but warn the user
SECRET_KEY = 'UNSAFE_DEFAULT'
# Django 1.5 requires this to be set. Here we default to prior behavior and allow all
ALLOWED_HOSTS = [ '*' ]
# Override to link a different URL for login (e.g. for django_openid_auth)
LOGIN_URL = reverse_lazy('account_login')
# Set to True to require authentication to save or delete dashboards
DASHBOARD_REQUIRE_AUTHENTICATION = False
# Require Django change/delete permissions to save or delete dashboards.
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_PERMISSIONS = False
# Name of a group to which the user must belong to save or delete dashboards. Alternative to
# DASHBOARD_REQUIRE_PERMISSIONS, particularly useful when using only LDAP (without Admin app)
# NOTE: Requires DASHBOARD_REQUIRE_AUTHENTICATION to be set
DASHBOARD_REQUIRE_EDIT_GROUP = None
DATABASES = None
# If using rrdcached, set to the address or socket of the daemon
FLUSHRRDCACHED = ''
## Load our local_settings
try:
from graphite.local_settings import * # noqa
except ImportError:
print >> sys.stderr, "Could not import graphite.local_settings, using defaults!"
## Load Django settings if they werent picked up in local_settings
if not GRAPHITE_WEB_APP_SETTINGS_LOADED:
from graphite.app_settings import * # noqa
STATICFILES_DIRS = (
join(WEBAPP_DIR, 'content'),
)
## Set config dependent on flags set in local_settings
# Path configuration
if not STATIC_ROOT:
STATIC_ROOT = join(GRAPHITE_ROOT, 'static')
if not CONF_DIR:
CONF_DIR = os.environ.get('GRAPHITE_CONF_DIR', join(GRAPHITE_ROOT, 'conf'))
if not DASHBOARD_CONF:
DASHBOARD_CONF = join(CONF_DIR, 'dashboard.conf')
if not GRAPHTEMPLATES_CONF:
GRAPHTEMPLATES_CONF = join(CONF_DIR, 'graphTemplates.conf')
if not STORAGE_DIR:
STORAGE_DIR = os.environ.get('GRAPHITE_STORAGE_DIR', join(GRAPHITE_ROOT, 'storage'))
if not WHITELIST_FILE:
WHITELIST_FILE = join(STORAGE_DIR, 'lists', 'whitelist')
if not INDEX_FILE:
INDEX_FILE = join(STORAGE_DIR, 'index')
if not LOG_DIR:
LOG_DIR = join(STORAGE_DIR, 'log', 'webapp')
if not WHISPER_DIR:
WHISPER_DIR = join(STORAGE_DIR, 'whisper/')
if not CERES_DIR:
CERES_DIR = join(STORAGE_DIR, 'ceres/')
if not RRD_DIR:
RRD_DIR = join(STORAGE_DIR, 'rrd/')
if not STANDARD_DIRS:
try:
import whisper # noqa
if os.path.exists(WHISPER_DIR):
STANDARD_DIRS.append(WHISPER_DIR)
except ImportError:
print >> sys.stderr, "WARNING: whisper module could not be loaded, whisper support disabled"
try:
import rrdtool # noqa
if os.path.exists(RRD_DIR):
STANDARD_DIRS.append(RRD_DIR)
except ImportError:
pass
if DATABASES is None:
DATABASES = {
'default': {
'NAME': join(STORAGE_DIR, 'graphite.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
}
# Handle URL prefix in static files handling
if URL_PREFIX and not STATIC_URL.startswith(URL_PREFIX):
STATIC_URL = '/{0}{1}'.format(URL_PREFIX.strip('/'), STATIC_URL)
# Default sqlite db file
# This is set here so that a user-set STORAGE_DIR is available
if 'sqlite3' in DATABASES.get('default',{}).get('ENGINE','') \
and not DATABASES.get('default',{}).get('NAME'):
DATABASES['default']['NAME'] = join(STORAGE_DIR, 'graphite.db')
# Caching shortcuts
if MEMCACHE_HOSTS:
CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': MEMCACHE_HOSTS,
'TIMEOUT': DEFAULT_CACHE_DURATION,
'KEY_PREFIX': MEMCACHE_KEY_PREFIX,
}
# Authentication shortcuts
if USE_LDAP_AUTH and LDAP_URI is None:
LDAP_URI = "ldap://%s:%d/" % (LDAP_SERVER, LDAP_PORT)
if USE_REMOTE_USER_AUTHENTICATION or REMOTE_USER_BACKEND:
MIDDLEWARE_CLASSES += ('django.contrib.auth.middleware.RemoteUserMiddleware',)
if REMOTE_USER_BACKEND:
AUTHENTICATION_BACKENDS.insert(0,REMOTE_USER_BACKEND)
else:
AUTHENTICATION_BACKENDS.insert(0,'django.contrib.auth.backends.RemoteUserBackend')
if USE_LDAP_AUTH:
AUTHENTICATION_BACKENDS.insert(0,'graphite.account.ldapBackend.LDAPBackend')
if SECRET_KEY == 'UNSAFE_DEFAULT':
warn('SECRET_KEY is set to an unsafe default. This should be set in local_settings.py for better security')
| |
'''
Stack Layout
============
.. only:: html
.. image:: images/stacklayout.gif
:align: right
.. only:: latex
.. image:: images/stacklayout.png
:align: right
.. versionadded:: 1.0.5
The :class:`StackLayout` arranges children vertically or horizontally, as many
as the layout can fit. The size of the individual children widgets do not
have to be uniform.
For example, to display widgets that get progressively larger in width::
root = StackLayout()
for i in range(25):
btn = Button(text=str(i), width=40 + i * 5, size_hint=(None, 0.15))
root.add_widget(btn)
.. image:: images/stacklayout_sizing.png
:align: left
'''
__all__ = ('StackLayout', )
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, OptionProperty, \
ReferenceListProperty, VariableListProperty
class StackLayout(Layout):
'''Stack layout class. See module documentation for more information.
'''
spacing = VariableListProperty([0, 0], length=2)
'''Spacing between children: [spacing_horizontal, spacing_vertical].
spacing also accepts a single argument form [spacing].
:attr:`spacing` is a
:class:`~kivy.properties.VariableListProperty` and defaults to [0, 0].
'''
padding = VariableListProperty([0, 0, 0, 0])
'''Padding between the layout box and it's children: [padding_left,
padding_top, padding_right, padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a single argument form [padding].
.. versionchanged:: 1.7.0
Replaced the NumericProperty with a VariableListProperty.
:attr:`padding` is a
:class:`~kivy.properties.VariableListProperty` and defaults to
[0, 0, 0, 0].
'''
orientation = OptionProperty('lr-tb', options=(
'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt', 'bt-lr', 'rl-bt',
'bt-rl'))
'''Orientation of the layout.
:attr:`orientation` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'lr-tb'.
Valid orientations are 'lr-tb', 'tb-lr', 'rl-tb', 'tb-rl', 'lr-bt',
'bt-lr', 'rl-bt' and 'bt-rl'.
.. versionchanged:: 1.5.0
:attr:`orientation` now correctly handles all valid combinations of
'lr','rl','tb','bt'. Before this version only 'lr-tb' and
'tb-lr' were supported, and 'tb-lr' was misnamed and placed
widgets from bottom to top and from right to left (reversed compared
to what was expected).
.. note::
'lr' means Left to Right.
'rl' means Right to Left.
'tb' means Top to Bottom.
'bt' means Bottom to Top.
'''
minimum_width = NumericProperty(0)
'''Minimum width needed to contain all children. It is automatically set
by the layout.
.. versionadded:: 1.0.8
:attr:`minimum_width` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_height = NumericProperty(0)
'''Minimum height needed to contain all children. It is automatically set
by the layout.
.. versionadded:: 1.0.8
:attr:`minimum_height` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Minimum size needed to contain all children. It is automatically set
by the layout.
.. versionadded:: 1.0.8
:attr:`minimum_size` is a
:class:`~kivy.properties.ReferenceListProperty` of
(:attr:`minimum_width`, :attr:`minimum_height`) properties.
'''
def __init__(self, **kwargs):
super(StackLayout, self).__init__(**kwargs)
self.bind(
padding=self._trigger_layout,
spacing=self._trigger_layout,
children=self._trigger_layout,
orientation=self._trigger_layout,
size=self._trigger_layout,
pos=self._trigger_layout)
def do_layout(self, *largs):
# optimize layout by preventing looking at the same attribute in a loop
selfpos = self.pos
selfsize = self.size
orientation = self.orientation.split('-')
padding_left = self.padding[0]
padding_top = self.padding[1]
padding_right = self.padding[2]
padding_bottom = self.padding[3]
padding_x = padding_left + padding_right
padding_y = padding_top + padding_bottom
spacing_x, spacing_y = self.spacing
lc = []
# Determine which direction and in what order to place the widgets
posattr = [0] * 2
posdelta = [0] * 2
posstart = [0] * 2
for i in (0, 1):
posattr[i] = 1 * (orientation[i] in ('tb', 'bt'))
k = posattr[i]
if orientation[i] == 'lr':
# left to right
posdelta[i] = 1
posstart[i] = selfpos[k] + padding_left
elif orientation[i] == 'bt':
# bottom to top
posdelta[i] = 1
posstart[i] = selfpos[k] + padding_bottom
elif orientation[i] == 'rl':
# right to left
posdelta[i] = -1
posstart[i] = selfpos[k] + selfsize[k] - padding_right
else:
# top to bottom
posdelta[i] = -1
posstart[i] = selfpos[k] + selfsize[k] - padding_top
innerattr, outerattr = posattr
ustart, vstart = posstart
deltau, deltav = posdelta
del posattr, posdelta, posstart
u = ustart # inner loop position variable
v = vstart # outer loop position variable
# space calculation, used for determining when a row or column is full
if orientation[0] in ('lr', 'rl'):
lu = self.size[innerattr] - padding_x
sv = padding_y # size in v-direction, for minimum_size property
su = padding_x # size in h-direction
spacing_u = spacing_x
spacing_v = spacing_y
else:
lu = self.size[innerattr] - padding_y
sv = padding_x # size in v-direction, for minimum_size property
su = padding_y # size in h-direction
spacing_u = spacing_y
spacing_v = spacing_x
# space calculation, row height or column width, for arranging widgets
lv = 0
urev = (deltau < 0)
vrev = (deltav < 0)
for c in reversed(self.children):
if c.size_hint[0]:
c.width = c.size_hint[0] * (selfsize[0] - padding_x)
if c.size_hint[1]:
c.height = c.size_hint[1] * (selfsize[1] - padding_y)
# does the widget fit in the row/column?
if lu - c.size[innerattr] >= 0:
lc.append(c)
lu -= c.size[innerattr] + spacing_u
lv = max(lv, c.size[outerattr])
continue
# push the line
sv += lv + spacing_v
for c2 in lc:
if urev:
u -= c2.size[innerattr]
c2.pos[innerattr] = u
pos_outer = v
if vrev:
# v position is actually the top/right side of the widget
# when going from high to low coordinate values,
# we need to subtract the height/width from the position.
pos_outer -= c2.size[outerattr]
c2.pos[outerattr] = pos_outer
if urev:
u -= spacing_u
else:
u += c2.size[innerattr] + spacing_u
v += deltav * lv
v += deltav * spacing_v
lc = [c]
lv = c.size[outerattr]
lu = selfsize[innerattr] - su - c.size[innerattr] - spacing_u
u = ustart
if lc:
# push the last (incomplete) line
sv += lv + spacing_v
for c2 in lc:
if urev:
u -= c2.size[innerattr]
c2.pos[innerattr] = u
pos_outer = v
if vrev:
pos_outer -= c2.size[outerattr]
c2.pos[outerattr] = pos_outer
if urev:
u -= spacing_u
else:
u += c2.size[innerattr] + spacing_u
self.minimum_size[outerattr] = sv
| |
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import contextlib
import mock
from neutron.api import extensions as api_ext
from neutron.common import config
from neutron.tests.unit.db import test_db_base_plugin_v2
from oslo_config import cfg
from oslo_utils import importutils
import six
import webob.exc
import networking_cisco
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.common import (cisco_constants as
c_constants)
from networking_cisco.plugins.cisco.db.device_manager import (
hosting_device_manager_db as hdm_db)
from networking_cisco.plugins.cisco.device_manager.rpc import (
devmgr_rpc_cfgagent_api)
from networking_cisco.plugins.cisco.device_manager import service_vm_lib
from networking_cisco.plugins.cisco.extensions import ciscohostingdevicemanager
from networking_cisco.tests.unit.cisco.device_manager import (
device_manager_test_support)
policy_path = (os.path.abspath(networking_cisco.__path__[0]) +
'/../etc/policy.json')
DB_DM_PLUGIN_KLASS = (
'networking_cisco.plugins.cisco.db.device_manager.'
'hosting_device_manager_db.HostingDeviceManagerMixin')
NN_CATEGORY = ciscohostingdevicemanager.NETWORK_NODE_CATEGORY
NN_TEMPLATE_NAME = c_constants.NETWORK_NODE_TEMPLATE
NS_ROUTERTYPE_NAME = c_constants.NAMESPACE_ROUTER_TYPE
VM_CATEGORY = ciscohostingdevicemanager.VM_CATEGORY
VM_TEMPLATE_NAME = "CSR1kv_template"
VM_BOOTING_TIME = 420
VM_SLOT_CAPACITY = 3
VM_DESIRED_SLOTS_FREE = 3
VM_ROUTERTYPE_NAME = c_constants.CSR1KV_ROUTER_TYPE
HW_CATEGORY = ciscohostingdevicemanager.HARDWARE_CATEGORY
HW_TEMPLATE_NAME = "HW_template"
HW_ROUTERTYPE_NAME = "HW_router"
L3_ROUTER_NAT = bc.constants.L3
DEFAULT_SERVICE_TYPES = "router"
NETWORK_NODE_SERVICE_TYPES = "router:fwaas:vpn"
NOOP_DEVICE_DRIVER = ('networking_cisco.plugins.cisco.device_manager.'
'hosting_device_drivers.noop_hd_driver.'
'NoopHostingDeviceDriver')
NOOP_PLUGGING_DRIVER = ('networking_cisco.plugins.cisco.device_manager.'
'plugging_drivers.noop_plugging_driver.'
'NoopPluggingDriver')
TEST_DEVICE_DRIVER = NOOP_DEVICE_DRIVER
TEST_PLUGGING_DRIVER = ('networking_cisco.tests.unit.cisco.device_manager.'
'plugging_test_driver.TestPluggingDriver')
DESCRIPTION = "default description"
SHARED = True
ACTION = "allow"
ENABLED = True
ADMIN_STATE_UP = True
UNBOUND = None
REQUESTER = True
OTHER = False
DEFAULT_CREDENTIALS_ID = device_manager_test_support._uuid()
class DeviceManagerTestCaseMixin(object):
def _create_hosting_device(self, fmt, template_id, management_port_id,
admin_state_up, expected_res_status=None,
**kwargs):
data = {'hosting_device': self._get_test_hosting_device_attr(
template_id=template_id, management_port_id=management_port_id,
admin_state_up=admin_state_up, **kwargs)}
hd_req = self.new_create_request('hosting_devices', data, fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
hd_req.environ['neutron.context'] = bc.context.Context(
'', kwargs['tenant_id'])
hd_res = hd_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, hd_res.status_int)
return hd_res
@contextlib.contextmanager
def hosting_device(self, template_id, management_port_id=None, fmt=None,
admin_state_up=True, no_delete=False,
set_port_device_id=True, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_hosting_device(fmt, template_id, management_port_id,
admin_state_up, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
hosting_device = self.deserialize(fmt or self.fmt, res)
if set_port_device_id is True and management_port_id is not None:
data = {'port': {
'device_id': hosting_device['hosting_device']['id'],
'device_owner': 'Nova'}}
req = self.new_update_request('ports', data, management_port_id)
res = self.deserialize(self.fmt, req.get_response(self.api))
yield hosting_device
if not no_delete:
self._delete('hosting_devices',
hosting_device['hosting_device']['id'])
def _create_hosting_device_template(self, fmt, name, enabled,
host_category,
expected_res_status=None, **kwargs):
data = {'hosting_device_template':
self._get_test_hosting_device_template_attr(
name=name, enabled=enabled, host_category=host_category,
**kwargs)}
hdt_req = self.new_create_request('hosting_device_templates', data,
fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
hdt_req.environ['neutron.context'] = bc.context.Context(
'', kwargs['tenant_id'])
hdt_res = hdt_req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, hdt_res.status_int)
return hdt_res
@contextlib.contextmanager
def hosting_device_template(self, fmt=None, name='device_template_1',
enabled=True, host_category=VM_CATEGORY,
no_delete=False, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_hosting_device_template(fmt, name, enabled,
host_category, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
hd_template = self.deserialize(fmt or self.fmt, res)
yield hd_template
if not no_delete:
self._delete('hosting_device_templates',
hd_template['hosting_device_template']['id'])
def _get_test_hosting_device_attr(self, template_id, management_port_id,
admin_state_up=True, **kwargs):
data = {
'tenant_id': kwargs.get('tenant_id', self._tenant_id),
'template_id': template_id,
'credentials_id': kwargs.get('credentials_id'),
'device_id': kwargs.get('device_id', 'mfc_device_id'),
'admin_state_up': admin_state_up,
'management_ip_address': kwargs.get('management_ip_address',
'10.0.100.10'),
'management_port_id': management_port_id,
'protocol_port': kwargs.get('protocol_port', 22),
'cfg_agent_id': kwargs.get('cfg_agent_id'),
'tenant_bound': kwargs.get('tenant_bound'),
'auto_delete': kwargs.get('auto_delete', False)}
return data
def _get_test_hosting_device_template_attr(self, name='device_template_1',
enabled=True,
host_category=VM_CATEGORY,
**kwargs):
data = {
'tenant_id': kwargs.get('tenant_id', self._tenant_id),
'name': name,
'enabled': enabled,
'host_category': host_category,
'service_types': kwargs.get('service_types',
DEFAULT_SERVICE_TYPES),
'image': kwargs.get('image'),
'flavor': kwargs.get('flavor'),
'default_credentials_id': kwargs.get('default_credentials_id',
DEFAULT_CREDENTIALS_ID),
'configuration_mechanism': kwargs.get('configuration_mechanism'),
'protocol_port': kwargs.get('protocol_port', 22),
'booting_time': kwargs.get('booting_time', 0),
'slot_capacity': kwargs.get('slot_capacity', 0),
'desired_slots_free': kwargs.get('desired_slots_free', 0),
'tenant_bound': kwargs.get('tenant_bound', []),
'device_driver': kwargs.get('device_driver', NOOP_DEVICE_DRIVER),
'plugging_driver': kwargs.get('plugging_driver',
NOOP_PLUGGING_DRIVER)}
return data
def _test_list_resources(self, resource, items,
neutron_context=None,
query_params=None):
if resource.endswith('y'):
resource_plural = resource.replace('y', 'ies')
else:
resource_plural = resource + 's'
res = self._list(resource_plural,
neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertEqual(sorted([i[resource]['id'] for i in items]),
sorted([i['id'] for i in res[resource_plural]]))
def _replace_hosting_device_status(self, attrs, old_status, new_status):
if attrs['status'] is old_status:
attrs['status'] = new_status
return attrs
def _test_create_hosting_device_templates(self):
# template for network nodes.
nnt = self._create_hosting_device_template(self.fmt, NN_TEMPLATE_NAME,
True, NN_CATEGORY)
nw_node_template = self.deserialize(self.fmt, nnt)
vmt = self._create_hosting_device_template(
self.fmt, VM_TEMPLATE_NAME, True, VM_CATEGORY,
booting_time=VM_BOOTING_TIME,
slot_capacity=VM_SLOT_CAPACITY,
desired_slots_free=VM_DESIRED_SLOTS_FREE,
device_driver=TEST_DEVICE_DRIVER,
plugging_driver=TEST_PLUGGING_DRIVER)
vm_template = self.deserialize(self.fmt, vmt)
hwt = self._create_hosting_device_template(
self.fmt, HW_TEMPLATE_NAME, True, HW_CATEGORY)
hw_template = self.deserialize(self.fmt, hwt)
return {'network_node': {'template': nw_node_template,
'router_type': NS_ROUTERTYPE_NAME},
'vm': {'template': vm_template,
'router_type': VM_ROUTERTYPE_NAME},
'hw': {'template': hw_template,
'router_type': HW_ROUTERTYPE_NAME}}
def _test_remove_hosting_device_templates(self):
for hdt in self._list('hosting_device_templates')[
'hosting_device_templates']:
self._delete('hosting_device_templates', hdt['id'])
class TestDeviceManagerDBPlugin(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
DeviceManagerTestCaseMixin,
device_manager_test_support.DeviceManagerTestSupportMixin):
hdm_db.HostingDeviceManagerMixin.path_prefix = "/dev_mgr"
resource_prefix_map = dict(
(k, "/dev_mgr")
for k in ciscohostingdevicemanager.RESOURCE_ATTRIBUTE_MAP.keys())
def setUp(self, core_plugin=None, dm_plugin=None, ext_mgr=None):
if dm_plugin is None:
dm_plugin = DB_DM_PLUGIN_KLASS
service_plugins = {'dm_plugin_name': dm_plugin}
cfg.CONF.set_override('api_extensions_path',
device_manager_test_support.extensions_path)
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
hdm_db.HostingDeviceManagerMixin.supported_extension_aliases = (
[ciscohostingdevicemanager.HOSTING_DEVICE_MANAGER_ALIAS])
super(TestDeviceManagerDBPlugin, self).setUp(
plugin=core_plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
# Ensure we use policy definitions from our repo
cfg.CONF.set_override('policy_file', policy_path, 'oslo_policy')
if not ext_mgr:
self.plugin = importutils.import_object(dm_plugin)
ext_mgr = api_ext.PluginAwareExtensionManager(
device_manager_test_support.extensions_path,
{c_constants.DEVICE_MANAGER: self.plugin})
app = config.load_paste_app('extensions_test_app')
self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self._mock_l3_admin_tenant()
self._create_mgmt_nw_for_tests(self.fmt)
self._devmgr = bc.get_plugin(c_constants.DEVICE_MANAGER)
# in unit tests we don't use keystone so we mock that session
self._devmgr._svc_vm_mgr_obj = service_vm_lib.ServiceVMManager(
True, None, None, None, '', keystone_session=mock.MagicMock())
self._mock_svc_vm_create_delete(self._devmgr)
self._other_tenant_id = device_manager_test_support._uuid()
self._devmgr._core_plugin = bc.get_plugin()
def tearDown(self):
self._test_remove_all_hosting_devices()
self._remove_mgmt_nw_for_tests()
super(TestDeviceManagerDBPlugin, self).tearDown()
def test_create_vm_hosting_device(self):
with self.hosting_device_template() as hdt:
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
creds = device_manager_test_support._uuid()
attrs = self._get_test_hosting_device_attr(
template_id=hdt['hosting_device_template']['id'],
management_port_id=mgmt_port['port']['id'],
auto_delete=True, credentials_id=creds)
with self.hosting_device(
template_id=hdt['hosting_device_template']['id'],
management_port_id=mgmt_port['port']['id'],
auto_delete=True, credentials_id=creds) as hd:
for k, v in six.iteritems(attrs):
self.assertEqual(v, hd['hosting_device'][k])
def test_create_hw_hosting_device(self):
with self.hosting_device_template(host_category=HW_CATEGORY) as hdt:
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
creds = device_manager_test_support._uuid()
attrs = self._get_test_hosting_device_attr(
template_id=hdt['hosting_device_template']['id'],
management_port_id=mgmt_port['port']['id'],
credentials_id=creds)
with self.hosting_device(
template_id=hdt['hosting_device_template']['id'],
management_port_id=mgmt_port['port']['id'],
credentials_id=creds) as hd:
for k, v in six.iteritems(attrs):
self.assertEqual(v, hd['hosting_device'][k])
def test_show_hosting_device(self):
device_id = "device_XYZ"
with self.hosting_device_template() as hdt:
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
creds = device_manager_test_support._uuid()
attrs = self._get_test_hosting_device_attr(
device_id=device_id,
template_id=hdt['hosting_device_template']['id'],
management_port_id=mgmt_port['port']['id'],
credentials_id=creds)
with self.hosting_device(
device_id=device_id,
template_id=hdt['hosting_device_template']['id'],
management_port_id=mgmt_port['port']['id'],
credentials_id=creds) as hd:
req = self.new_show_request(
'hosting_devices', hd['hosting_device']['id'],
fmt=self.fmt)
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['hosting_device'][k])
def test_list_hosting_devices(self):
with self.hosting_device_template() as hdt:
hdt_id = hdt['hosting_device_template']['id']
with self.port(subnet=self._mgmt_subnet) as mgmt_port1,\
self.port(subnet=self._mgmt_subnet) as mgmt_port2,\
self.port(subnet=self._mgmt_subnet) as mgmt_port3:
mp1_id = mgmt_port1['port']['id']
mp2_id = mgmt_port2['port']['id']
mp3_id = mgmt_port3['port']['id']
with self.hosting_device(
name='hd1', template_id=hdt_id,
management_port_id=mp1_id) as hd1,\
self.hosting_device(
name='hd2', template_id=hdt_id,
management_port_id=mp2_id) as hd2,\
self.hosting_device(
name='hd3', template_id=hdt_id,
management_port_id=mp3_id) as hd3:
self._test_list_resources(
'hosting_device', [hd1, hd2, hd3],
query_params='template_id=' + hdt_id)
def test_update_hosting_device(self):
new_device_id = "device_XYZ"
with self.hosting_device_template() as hdt:
hdt_id = hdt['hosting_device_template']['id']
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
mgmt_port_id = mgmt_port['port']['id']
creds = device_manager_test_support._uuid()
attrs = self._get_test_hosting_device_attr(
device_id=new_device_id,
template_id=hdt['hosting_device_template']['id'],
management_port_id=mgmt_port['port']['id'],
credentials_id=creds)
with self.hosting_device(
template_id=hdt_id,
management_port_id=mgmt_port_id,
credentials_id=creds) as hd:
data = {'hosting_device': {'device_id': new_device_id}}
req = self.new_update_request('hosting_devices', data,
hd['hosting_device']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['hosting_device'][k])
def test_delete_hosting_device_not_in_use_succeeds(self):
ctx = bc.context.get_admin_context()
with self.hosting_device_template() as hdt:
hdt_id = hdt['hosting_device_template']['id']
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
mgmt_port_id = mgmt_port['port']['id']
with self.hosting_device(template_id=hdt_id,
management_port_id=mgmt_port_id,
no_delete=True) as hd:
hd_id = hd['hosting_device']['id']
req = self.new_delete_request('hosting_devices', hd_id)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
self.assertRaises(
ciscohostingdevicemanager.HostingDeviceNotFound,
self.plugin.get_hosting_device, ctx, hd_id)
def test_delete_hosting_device_in_use_fails(self):
ctx = bc.context.get_admin_context()
with self.hosting_device_template(slot_capacity=1) as hdt:
hdt_id = hdt['hosting_device_template']['id']
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
mgmt_port_id = mgmt_port['port']['id']
with self.hosting_device(
template_id=hdt_id,
management_port_id=mgmt_port_id) as hd:
with mock.patch.object(
hdm_db.HostingDeviceManagerMixin,
'_dispatch_pool_maintenance_job'):
hd_id = hd['hosting_device']['id']
hd_db = self._devmgr._get_hosting_device(ctx, hd_id)
resource = self._get_fake_resource()
self.assertTrue(
self._devmgr.acquire_hosting_device_slots(
ctx, hd_db, resource, 'router', L3_ROUTER_NAT,
1))
self.assertRaises(
ciscohostingdevicemanager.HostingDeviceInUse,
self._devmgr.delete_hosting_device, ctx, hd_id)
req = self.new_show_request('hosting_devices', hd_id,
fmt=self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
self._devmgr.release_hosting_device_slots(ctx, hd_db,
resource, 1)
def test_get_hosting_device_configuration(self):
with self.hosting_device_template() as hdt:
hdt_id = hdt['hosting_device_template']['id']
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
mgmt_port_id = mgmt_port['port']['id']
with self.hosting_device(
template_id=hdt_id,
management_port_id=mgmt_port_id) as hd:
hd_id = hd['hosting_device']['id']
rpc = devmgr_rpc_cfgagent_api.DeviceMgrCfgAgentNotifyAPI(
self._devmgr)
self._devmgr.agent_notifiers = {
c_constants.AGENT_TYPE_CFG: rpc}
self._devmgr.get_cfg_agents_for_hosting_devices = None
with mock.patch.object(rpc.client, 'prepare',
return_value=rpc.client) as (
mock_prepare),\
mock.patch.object(rpc.client, 'call') as mock_call,\
mock.patch.object(
self._devmgr,
'get_cfg_agents_for_hosting_devices') as agt_mock:
agt_mock.return_value = [mock.MagicMock()]
agent_host = 'an_agent_host'
agt_mock.return_value[0].host = agent_host
fake_running_config = 'a fake running config'
mock_call.return_value = fake_running_config
ctx = bc.context.Context(
user_id=None, tenant_id=None, is_admin=False,
overwrite=False)
res = self._devmgr.get_hosting_device_config(ctx,
hd_id)
self.assertEqual(fake_running_config, res)
agt_mock.assert_called_once_with(
mock.ANY, [hd_id], admin_state_up=True,
schedule=True)
mock_prepare.assert_called_with(server=agent_host)
mock_call.assert_called_with(
mock.ANY, 'get_hosting_device_configuration',
payload={'hosting_device_id': hd_id})
def test_get_hosting_device_configuration_no_agent_found(self):
ctx = bc.context.Context(user_id=None, tenant_id=None, is_admin=False,
overwrite=False)
with self.hosting_device_template() as hdt:
hdt_id = hdt['hosting_device_template']['id']
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
mgmt_port_id = mgmt_port['port']['id']
with self.hosting_device(
template_id=hdt_id,
management_port_id=mgmt_port_id) as hd:
hd_id = hd['hosting_device']['id']
rpc = devmgr_rpc_cfgagent_api.DeviceMgrCfgAgentNotifyAPI(
self._devmgr)
self._devmgr.agent_notifiers = {
c_constants.AGENT_TYPE_CFG: rpc}
self._devmgr.get_cfg_agents_for_hosting_devices = None
with mock.patch.object(rpc.client, 'prepare',
return_value=rpc.client) as (
mock_prepare),\
mock.patch.object(rpc.client, 'call') as mock_call,\
mock.patch.object(
self._devmgr,
'get_cfg_agents_for_hosting_devices') as agt_mock:
agt_mock.return_value = []
res = self._devmgr.get_hosting_device_config(ctx,
hd_id)
self.assertIsNone(res)
agt_mock.assert_called_once_with(
mock.ANY, [hd_id], admin_state_up=True,
schedule=True)
self.assertEqual(0, mock_prepare.call_count)
self.assertEqual(0, mock_call.call_count)
def test_hosting_device_policy(self):
device_id = "device_XYZ"
with self.hosting_device_template() as hdt:
hdt_id = hdt['hosting_device_template']['id']
tenant_id = hdt['hosting_device_template']['tenant_id']
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
mgmt_port_id = mgmt_port['port']['id']
creds = device_manager_test_support._uuid()
with self.hosting_device(
device_id=device_id,
template_id=hdt_id,
management_port_id=mgmt_port_id,
credentials_id=creds) as hd:
hd_id = hd['hosting_device']['id']
# create fails
self._create_hosting_device(
self.fmt, hdt_id, mgmt_port_id, True,
webob.exc.HTTPForbidden.code,
tenant_id=tenant_id, set_context=True)
non_admin_ctx = bc.context.Context('', tenant_id)
# show fails
self._show('hosting_devices', hd_id,
webob.exc.HTTPNotFound.code, non_admin_ctx)
# update fails
self._update('hosting_devices', hd_id,
{'hosting_device': {'name': 'new_name'}},
webob.exc.HTTPForbidden.code, non_admin_ctx)
# delete fails
self._delete('hosting_devices', hd_id,
webob.exc.HTTPNotFound.code, non_admin_ctx)
# get config fails
req = self.new_show_request(
'hosting_devices', hd_id, self.fmt,
'get_hosting_device_config')
req.environ['neutron.context'] = non_admin_ctx
res = req.get_response(self._api_for_resource(
'hosting_devices'))
self.assertEqual(webob.exc.HTTPNotFound.code,
res.status_int)
def test_create_vm_hosting_device_template(self):
attrs = self._get_test_hosting_device_template_attr()
with self.hosting_device_template() as hdt:
for k, v in six.iteritems(attrs):
self.assertEqual(v, hdt['hosting_device_template'][k])
def test_create_hw_hosting_device_template(self):
attrs = self._get_test_hosting_device_template_attr(
host_category=HW_CATEGORY)
with self.hosting_device_template(host_category=HW_CATEGORY) as hdt:
for k, v in six.iteritems(attrs):
self.assertEqual(v, hdt['hosting_device_template'][k])
def test_create_nn_hosting_device_template(self):
attrs = self._get_test_hosting_device_template_attr(
host_category=NN_CATEGORY)
with self.hosting_device_template(host_category=NN_CATEGORY) as hdt:
for k, v in six.iteritems(attrs):
self.assertEqual(v, hdt['hosting_device_template'][k])
def test_show_hosting_device_template(self):
name = "hosting_device_template1"
attrs = self._get_test_hosting_device_template_attr(name=name)
with self.hosting_device_template(name=name) as hdt:
req = self.new_show_request('hosting_device_templates',
hdt['hosting_device_template']['id'],
fmt=self.fmt)
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['hosting_device_template'][k])
def test_list_hosting_device_templates(self):
with self.hosting_device_template(name='hdt1',
host_category=VM_CATEGORY,
image='an_image') as hdt1,\
self.hosting_device_template(name='hdt2',
host_category=HW_CATEGORY,
image='an_image') as hdt2,\
self.hosting_device_template(name='hdt3',
host_category=NN_CATEGORY,
image='an_image') as hdt3:
self._test_list_resources(
'hosting_device_template', [hdt1, hdt2, hdt3],
query_params='image=an_image')
def test_update_hosting_device_template(self):
name = "new_hosting_device_template1"
attrs = self._get_test_hosting_device_template_attr(name=name)
with self.hosting_device_template() as hdt:
data = {'hosting_device_template': {'name': name}}
req = self.new_update_request('hosting_device_templates', data,
hdt['hosting_device_template']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['hosting_device_template'][k])
def test_delete_hosting_device_template_not_in_use_succeeds(self):
ctx = bc.context.get_admin_context()
with self.hosting_device_template(no_delete=True) as hdt:
hdt_id = hdt['hosting_device_template']['id']
req = self.new_delete_request('hosting_device_templates', hdt_id)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
self.assertRaises(
ciscohostingdevicemanager.HostingDeviceTemplateNotFound,
self._devmgr.get_hosting_device_template, ctx, hdt_id)
def test_delete_hosting_device_template_in_use_fails(self):
ctx = bc.context.get_admin_context()
with self.hosting_device_template() as hdt:
hdt_id = hdt['hosting_device_template']['id']
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
mgmt_port_id = mgmt_port['port']['id']
with self.hosting_device(template_id=hdt_id,
management_port_id=mgmt_port_id):
self.assertRaises(
ciscohostingdevicemanager.HostingDeviceTemplateInUse,
self._devmgr.delete_hosting_device_template, ctx,
hdt_id)
req = self.new_show_request('hosting_device_templates',
hdt_id, fmt=self.fmt)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
def test_hosting_device_template_policy(self):
with self.hosting_device_template() as hdt:
hdt_id = hdt['hosting_device_template']['id']
tenant_id = hdt['hosting_device_template']['tenant_id']
# create fails
self._create_hosting_device_template(
self.fmt, 'my_template', True, 'Hardware',
webob.exc.HTTPForbidden.code,
tenant_id=tenant_id, set_context=True)
non_admin_ctx = bc.context.Context('', tenant_id)
# show fail
self._show('hosting_device_templates', hdt_id,
webob.exc.HTTPNotFound.code, non_admin_ctx)
# update fail
self._update('hosting_device_templates', hdt_id,
{'hosting_device_template': {'enabled': False}},
webob.exc.HTTPForbidden.code, non_admin_ctx)
# delete fail
self._delete('hosting_device_templates', hdt_id,
webob.exc.HTTPNotFound.code, non_admin_ctx)
# driver request test helper
def _test_get_driver(self, get_method, id=None, test_for_none=False,
is_admin=False):
with self.hosting_device_template() as hdt:
context = self._get_test_context(
tenant_id=hdt['hosting_device_template']['tenant_id'],
is_admin=is_admin)
driver_getter = getattr(self._devmgr, get_method)
template_id = id or hdt['hosting_device_template']['id']
driver = driver_getter(context, template_id)
if test_for_none:
self.assertIsNone(driver)
else:
self.assertIsNotNone(driver)
# driver request tests
def test_get_hosting_device_driver(self):
self._test_get_driver('get_hosting_device_driver')
def test_get_non_existent_hosting_device_driver_returns_none(self):
self._test_get_driver('get_hosting_device_driver', 'bogus_id', True)
def test_get_plugging_device_driver(self):
self._test_get_driver('get_hosting_device_plugging_driver')
def test_get_non_existent_plugging_device_driver_returns_none(self):
self._test_get_driver('get_hosting_device_plugging_driver', 'bogus_id',
True)
# get device info tests
def test_get_device_info_for_agent(self):
device_id = "device_XYZ"
with self.hosting_device_template() as hdt, self.port(
subnet=self._mgmt_subnet) as mgmt_port:
creds = device_manager_test_support._uuid()
mgmt_ip = mgmt_port['port']['fixed_ips'][0]['ip_address']
with self.hosting_device(
device_id=device_id,
template_id=hdt['hosting_device_template']['id'],
management_port_id=mgmt_port['port']['id'],
credentials_id=creds) as hd:
context = self._get_test_context(
tenant_id=hdt['hosting_device_template']['tenant_id'],
is_admin=True)
hd_id = hd['hosting_device']['id']
hd_db = self._devmgr._get_hosting_device(context, hd_id)
info = self._devmgr.get_device_info_for_agent(context, hd_db)
self.assertEqual(mgmt_ip, info['management_ip_address'])
def test_get_device_info_for_agent_no_mgmt_port(self):
device_id = "device_XYZ"
with self.hosting_device_template() as hdt:
creds = device_manager_test_support._uuid()
mgmt_ip = '192.168.0.55'
with self.hosting_device(
device_id=device_id,
template_id=hdt['hosting_device_template']['id'],
management_ip_address=mgmt_ip,
management_port_id=None,
credentials_id=creds) as hd:
context = self._get_test_context(
tenant_id=hdt['hosting_device_template']['tenant_id'],
is_admin=True)
hd_id = hd['hosting_device']['id']
hd_db = self._devmgr._get_hosting_device(context, hd_id)
info = self._devmgr.get_device_info_for_agent(context, hd_db)
self.assertEqual(mgmt_ip, info['management_ip_address'])
def _set_ownership(self, bound_status, tenant_id, other_tenant_id=None):
if bound_status == UNBOUND:
return None
elif bound_status == OTHER:
return other_tenant_id or self._other_tenant_id
else:
return tenant_id
# slot allocation and release test helper:
# succeeds means returns True, fails means returns False
def _test_slots(self, expected_result=True, expected_bind=UNBOUND,
expected_allocation=VM_SLOT_CAPACITY,
num_requested=VM_SLOT_CAPACITY,
slot_capacity=VM_SLOT_CAPACITY, initial_bind=UNBOUND,
bind=False, auto_delete=True, is_admin=False,
pool_maintenance_expected=True, test_release=False,
expected_release_result=True, expected_final_allocation=0,
expected_release_bind=UNBOUND,
num_to_release=VM_SLOT_CAPACITY,
release_pool_maintenance_expected=True):
with self.hosting_device_template(
slot_capacity=slot_capacity) as hdt:
with self.port(subnet=self._mgmt_subnet) as mgmt_port:
resource = self._get_fake_resource()
tenant_bound = self._set_ownership(
initial_bind, resource['tenant_id'])
with self.hosting_device(
template_id=hdt['hosting_device_template']['id'],
management_port_id=mgmt_port['port']['id'],
tenant_bound=tenant_bound,
auto_delete=auto_delete) as hd:
context = self._get_test_context(
tenant_id=hdt['hosting_device_template']['tenant_id'],
is_admin=is_admin)
hd_db = self._devmgr._get_hosting_device(
context, hd['hosting_device']['id'])
with mock.patch.object(
hdm_db.HostingDeviceManagerMixin,
'_dispatch_pool_maintenance_job') as pm_mock:
result = self._devmgr.acquire_hosting_device_slots(
context, hd_db, resource, 'router', L3_ROUTER_NAT,
num_requested, bind)
allocation = self._devmgr.get_slot_allocation(
context, resource_id=resource['id'])
self.assertEqual(expected_result, result)
self.assertEqual(expected_allocation, allocation)
expected_bind = self._set_ownership(
expected_bind, resource['tenant_id'])
self.assertEqual(expected_bind, hd_db.tenant_bound)
if pool_maintenance_expected:
pm_mock.assert_called_once_with(mock.ANY)
num_calls = 1
else:
pm_mock.assert_not_called()
num_calls = 0
if test_release:
result = self._devmgr.release_hosting_device_slots(
context, hd_db, resource, num_to_release)
if not test_release:
return
allocation = self._devmgr.get_slot_allocation(
context, resource_id=resource['id'])
self.assertEqual(expected_release_result, result)
self.assertEqual(expected_final_allocation,
allocation)
expected_release_bind = self._set_ownership(
expected_release_bind, resource['tenant_id'])
self.assertEqual(expected_release_bind,
hd_db.tenant_bound)
if release_pool_maintenance_expected:
num_calls += 1
self.assertEqual(num_calls, pm_mock.call_count)
else:
# ensure we clean up everything
num_to_release = 0
to_clean_up = num_requested - num_to_release
if to_clean_up < 0:
to_clean_up = num_requested
if to_clean_up:
self._devmgr.release_hosting_device_slots(
context, hd_db, resource, to_clean_up)
# slot allocation tests
def test_acquire_with_slot_surplus_in_owned_hosting_device_succeeds(self):
self._test_slots(expected_bind=REQUESTER, initial_bind=REQUESTER,
bind=True)
def test_acquire_with_slot_surplus_in_shared_hosting_device_succeeds(self):
self._test_slots()
def test_acquire_with_slot_surplus_take_hosting_device_ownership_succeeds(
self):
self._test_slots(expected_bind=REQUESTER, initial_bind=UNBOUND,
bind=True)
def test_acquire_with_slot_surplus_drop_hosting_device_ownership_succeeds(
self):
self._test_slots(expected_bind=UNBOUND, initial_bind=REQUESTER,
bind=False)
def test_acquire_slots_release_hosting_device_ownership_affects_all(self):
#TODO(bobmel): Implement this unit test
pass
def test_acquire_slots_in_other_owned_hosting_device_fails(self):
self._test_slots(expected_result=False, expected_bind=OTHER,
expected_allocation=0, initial_bind=OTHER,
pool_maintenance_expected=False)
def test_acquire_slots_take_ownership_of_other_owned_hosting_device_fails(
self):
self._test_slots(expected_result=False, expected_bind=OTHER,
expected_allocation=0, initial_bind=OTHER,
bind=True, pool_maintenance_expected=False)
def test_acquire_slots_take_ownership_of_multi_tenant_hosting_device_fails(
self):
#TODO(bobmel): Implement this unit test
pass
def test_acquire_with_slot_deficit_in_owned_hosting_device_fails(self):
self._test_slots(expected_result=False, expected_bind=REQUESTER,
expected_allocation=0, initial_bind=REQUESTER,
num_requested=VM_SLOT_CAPACITY + 1)
def test_acquire_with_slot_deficit_in_shared_hosting_device_fails(self):
self._test_slots(expected_result=False, expected_bind=UNBOUND,
expected_allocation=0,
num_requested=VM_SLOT_CAPACITY + 1)
def test_acquire_with_slot_deficit_in_other_owned_hosting_device_fails(
self):
self._test_slots(expected_result=False, expected_bind=OTHER,
expected_allocation=0, initial_bind=OTHER,
num_requested=VM_SLOT_CAPACITY + 1,
pool_maintenance_expected=False)
# slot release tests
def test_release_allocated_slots_in_owned_hosting_device_succeeds(self):
self._test_slots(expected_bind=REQUESTER, initial_bind=REQUESTER,
bind=True, test_release=True,
expected_release_bind=REQUESTER,
expected_final_allocation=1,
num_to_release=VM_SLOT_CAPACITY - 1)
def test_release_allocated_slots_in_shared_hosting_device_succeeds(self):
self._test_slots(test_release=True, expected_final_allocation=1,
num_to_release=VM_SLOT_CAPACITY - 1)
def test_release_all_slots_returns_hosting_device_ownership(self):
self._test_slots(expected_bind=REQUESTER, initial_bind=REQUESTER,
bind=True, test_release=True,
expected_release_bind=UNBOUND)
def test_release_slots_in_other_owned_hosting_device_fails(self):
self._test_slots(expected_result=False, expected_bind=OTHER,
expected_allocation=0, initial_bind=OTHER,
pool_maintenance_expected=False,
test_release=True, expected_release_result=False,
expected_release_bind=OTHER,
expected_final_allocation=0,
num_to_release=VM_SLOT_CAPACITY - 1,
release_pool_maintenance_expected=False)
def test_release_too_many_slots_in_owned_hosting_device_fails(self):
self._test_slots(expected_bind=REQUESTER, initial_bind=REQUESTER,
bind=True, test_release=True,
expected_release_result=False,
expected_release_bind=REQUESTER,
expected_final_allocation=VM_SLOT_CAPACITY,
num_to_release=VM_SLOT_CAPACITY + 1)
def test_release_too_many_slots_in_shared_hosting_device_fails(self):
self._test_slots(test_release=True, expected_release_result=False,
expected_release_bind=UNBOUND,
expected_final_allocation=VM_SLOT_CAPACITY,
num_to_release=VM_SLOT_CAPACITY + 1)
def test_release_too_many_slots_in_other_owned_hosting_device_fails(
self):
self._test_slots(expected_result=False, expected_bind=OTHER,
expected_allocation=0, initial_bind=OTHER,
pool_maintenance_expected=False,
test_release=True, expected_release_result=False,
expected_release_bind=OTHER,
expected_final_allocation=0,
num_to_release=VM_SLOT_CAPACITY + 1,
release_pool_maintenance_expected=False)
def test_release_all_slots_by_negative_num_argument_shared_hosting_device(
self):
self._test_slots(test_release=True, expected_final_allocation=0,
num_to_release=-1)
def test_release_all_slots_by_negative_num_argument_owned_hosting_device(
self):
self._test_slots(expected_bind=REQUESTER, initial_bind=REQUESTER,
bind=True, test_release=True, expected_release_bind=UNBOUND,
expected_final_allocation=0, num_to_release=-1)
# hosting device deletion test helper
def _test_delete(self, to_delete=None, auto_delete=None, no_delete=None,
force_delete=True, expected_num_remaining=0):
auto_delete = auto_delete or [True, False, False, True, True]
no_delete = no_delete or [True, True, True, True, True]
with self.hosting_device_template() as hdt1,\
self.hosting_device_template() as hdt2:
hdt0_id = hdt1['hosting_device_template']['id']
hdt1_id = hdt2['hosting_device_template']['id']
with self.port(subnet=self._mgmt_subnet,
no_delete=no_delete[0]) as mgmt_port0,\
self.port(subnet=self._mgmt_subnet,
no_delete=no_delete[1]) as mgmt_port1,\
self.port(subnet=self._mgmt_subnet,
no_delete=no_delete[2]) as mgmt_port2,\
self.port(subnet=self._mgmt_subnet,
no_delete=no_delete[3]) as mgmt_port3,\
self.port(subnet=self._mgmt_subnet,
no_delete=no_delete[4]) as mgmt_port4:
mp0_id = mgmt_port0['port']['id']
mp1_id = mgmt_port1['port']['id']
mp2_id = mgmt_port2['port']['id']
mp3_id = mgmt_port3['port']['id']
mp4_id = mgmt_port4['port']['id']
with self.hosting_device(
device_id='0_hdt0_id', template_id=hdt0_id,
management_port_id=mp0_id, auto_delete=auto_delete[0],
no_delete=no_delete[0]),\
self.hosting_device(
device_id='1_hdt1_id', template_id=hdt1_id,
management_port_id=mp1_id, auto_delete=auto_delete[1],
no_delete=no_delete[1]),\
self.hosting_device(
device_id='2_hdt0_id', template_id=hdt0_id,
management_port_id=mp2_id, auto_delete=auto_delete[2],
no_delete=no_delete[2]),\
self.hosting_device(
device_id='3_hdt0_id', template_id=hdt0_id,
management_port_id=mp3_id,
auto_delete=auto_delete[3],
no_delete=no_delete[3]),\
self.hosting_device(
device_id='4_hdt1_id', template_id=hdt1_id,
management_port_id=mp4_id, auto_delete=auto_delete[4],
no_delete=no_delete[4]):
context = self._get_test_context(is_admin=True)
if to_delete is None:
self._devmgr.delete_all_hosting_devices(
context, force_delete)
elif to_delete == 0:
template = (
self._devmgr._get_hosting_device_template(
context, hdt0_id))
(self._devmgr.
delete_all_hosting_devices_by_template(
context, template, force_delete))
else:
template = (
self._devmgr._get_hosting_device_template(
context, hdt1_id))
(self._devmgr.
delete_all_hosting_devices_by_template(
context, template, force_delete))
result_hds = self._list(
'hosting_devices')['hosting_devices']
self.assertEqual(expected_num_remaining,
len(result_hds))
# hosting device deletion tests
def test_delete_all_hosting_devices(self):
self._test_delete()
def test_delete_all_managed_hosting_devices(self):
self._test_delete(no_delete=[True, False, False, True, True],
force_delete=False, expected_num_remaining=2)
def test_delete_all_hosting_devices_by_template(self):
self._test_delete(to_delete=1, expected_num_remaining=3,
no_delete=[False, True, False, False, True])
def test_delete_all_managed_hosting_devices_by_template(self):
self._test_delete(to_delete=1, expected_num_remaining=4,
no_delete=[False, False, False, False, True],
force_delete=False)
# handled failed hosting device test helper
def _test_failed_hosting_device(self, host_category=VM_CATEGORY,
expected_num_remaining=0,
auto_delete=True, no_delete=True):
with self.hosting_device_template(host_category=host_category) as hdt:
hdt_id = hdt['hosting_device_template']['id']
with self.port(subnet=self._mgmt_subnet,
no_delete=no_delete) as mgmt_port:
with self.hosting_device(
template_id=hdt_id,
management_port_id=mgmt_port['port']['id'],
auto_delete=auto_delete, no_delete=no_delete) as hd:
with mock.patch('networking_cisco.backwards_compatibility.'
'get_plugin'):
hd_id = hd['hosting_device']['id']
m2 = mock.MagicMock()
self._devmgr.agent_notifiers = {
c_constants.AGENT_TYPE_CFG: m2}
context = self._get_test_context()
self._devmgr.handle_non_responding_hosting_devices(
context, None, [hd_id])
result_hds = self._list('hosting_devices')[
'hosting_devices']
self.assertEqual(expected_num_remaining,
len(result_hds))
l3mock = (bc.get_plugin().
handle_non_responding_hosting_devices)
l3mock.assert_called_once_with(mock.ANY, mock.ANY,
{hd_id: {}})
if expected_num_remaining == 0:
m2.hosting_devices_removed.assert_called_once_with(
mock.ANY, {hd_id: {}}, False, None)
# handled failed hosting device tests
def test_failed_managed_vm_based_hosting_device_gets_deleted(self):
self._test_failed_hosting_device()
def test_failed_non_managed_vm_based_hosting_device_not_deleted(self):
self._test_failed_hosting_device(expected_num_remaining=1,
auto_delete=False, no_delete=False)
def test_failed_non_vm_based_hosting_device_not_deleted(self):
self._test_failed_hosting_device(host_category=HW_CATEGORY,
expected_num_remaining=1,
no_delete=False)
# hosting device pool maintenance test helper
def _test_pool_maintenance(self, desired_slots_free=10, slot_capacity=3,
host_category=VM_CATEGORY, expected=15,
define_credentials=True):
with self.hosting_device_template(
host_category=host_category, slot_capacity=slot_capacity,
desired_slots_free=desired_slots_free,
plugging_driver=TEST_PLUGGING_DRIVER) as hdt:
hdt_id = hdt['hosting_device_template']['id']
creds_id = (DEFAULT_CREDENTIALS_ID if define_credentials is True
else 'non_existent_id')
credentials = {'user_name': 'bob', 'password': 'tooEasy'}
with mock.patch.dict(
self.plugin._credentials,
{creds_id: credentials}),\
self.port(subnet=self._mgmt_subnet,
no_delete=True) as mgmt_port1,\
self.port(subnet=self._mgmt_subnet,
no_delete=True) as mgmt_port2:
with self.hosting_device(
template_id=hdt_id,
management_port_id=mgmt_port1['port']['id'],
auto_delete=True, no_delete=True),\
self.hosting_device(
template_id=hdt_id,
management_port_id=mgmt_port2['port']['id'],
auto_delete=True, no_delete=True):
context = self._get_test_context(is_admin=True)
template = self._devmgr._get_hosting_device_template(
context, hdt_id)
self._devmgr._gt_pool = mock.MagicMock()
self._devmgr._gt_pool.spawn_n.side_effect = (
lambda fcn, ctx, tmplt: fcn(ctx, tmplt))
self._devmgr._dispatch_pool_maintenance_job(
template)
result_hds = self._list(
'hosting_devices')['hosting_devices']
self.assertEqual(expected, len(result_hds) * slot_capacity)
self._devmgr.delete_all_hosting_devices(context, True)
# hosting device pool maintenance tests
def test_vm_based_hosting_device_excessive_slot_deficit_adds_slots(self):
self._test_pool_maintenance()
def test_vm_based_hosting_device_excessive_slot_deficit_no_credentials(
self):
# no slots are added if credentials for template are missing
self._test_pool_maintenance(expected=6, define_credentials=False)
def test_vm_based_hosting_device_marginal_slot_deficit_no_change(self):
self._test_pool_maintenance(desired_slots_free=7, expected=6)
def test_vm_based_hosting_device_excessive_slot_surplus_removes_slots(
self):
self._test_pool_maintenance(desired_slots_free=3, expected=3)
def test_vm_based_hosting_device_marginal_slot_surplus_no_change(self):
self._test_pool_maintenance(desired_slots_free=5, expected=6)
def test_hw_based_hosting_device_no_change(self):
self._test_pool_maintenance(host_category=HW_CATEGORY, expected=6)
| |
import logging
import re
import simplejson as json
import traceback
import sys
from functools import partial
from lxml.builder import ElementMaker
import lxml.html
from lxml.etree import Entity
from lxml.html import html_parser, HtmlElement
import jinja2
from paucore.utils.data import is_seq_not_string
logger = logging.getLogger(__name__)
if sys.maxunicode == 65535:
# Heroku seems to use a narrow python build
IMPROPER_HTML_ENCODING = re.compile(ur'[^\u0009\u000A\u000D\u0020-\uD7FF\uE000-\uFFFD]')
else:
IMPROPER_HTML_ENCODING = re.compile(ur'[^\u0009\u000A\u000D\u0020-\uD7FF\uE000-\uFFFD\U00010000-\U0010FFFF]')
def _flatten_classes(classes):
return set((' '.join(classes)).split(' '))
def _format_classes(class_set):
return ' '.join(class_set)
def render_etree(tree):
# HtmlElement is a seq, but we want to treat seqs of HtmlElements differently than HtmlElements
# XXX verify that this is a list of HtmlElements? we'll see after we audit redner_etree_to_string
if tree is not None and is_seq_not_string(tree) and not isinstance(tree, HtmlElement):
return jinja2.Markup(''.join(map(render_etree_to_string, tree)))
elif tree is not None and not isinstance(tree, jinja2.runtime.Undefined):
# etrees evaluate to false, so we check is not None, but jinja2.runtime.Undefined could get passed in here
return jinja2.Markup(lxml.html.tostring(tree))
elif isinstance(tree, basestring):
return tree
else:
return ''
def html_from_string(s):
html_str = ''
if s:
try:
html_str = lxml.html.fromstring(s)
except lxml.etree.ParserError:
pass
return html_str
def render_etree_to_string(tree):
# 6/1/12 mthurman: if I don't see these log messages, then in render_etree i'm going to
# filter(lambda t: isinstance(t, HtmlElement), tree) in the 1st clause
# We really should only pass in an lxml etree here
if tree is None:
message = 'render_etree_to_string called with None object\n%s' % ''.join(traceback.format_stack())
logger.error(message)
return ''
elif isinstance(tree, basestring):
return tree
else:
return lxml.html.tostring(tree)
def render_presenters(presenters, wrapper=None, sep=None):
# just checking if presenters doesn't work for HtmlElements, so check that separately--they don't need to go through present
if isinstance(presenters, HtmlElement):
return render_etree(presenters)
elif presenters:
trees = HtmlGen().present(presenters, wrapper, sep)
return render_etree(trees)
else:
return ''
def extract_classes(attrs):
class_ = attrs.pop('class_', ())
if not is_seq_not_string(class_):
class_ = (class_,)
return (class_, attrs)
class MXMLGenericElementMixin(object):
@property
def classes(self):
c = self.attrib.get('class', '').split(' ')
return set(filter(None, c))
def add_class(self, *args):
new_classes = _flatten_classes(args)
self.attrib['class'] = _format_classes(self.classes | new_classes)
return self
def remove_class(self, *args):
rm_classes = _flatten_classes(args)
self.attrib['class'] = _format_classes(self.classes - rm_classes)
return self
def data(self, key, value):
self.attrib['data-%s' % key] = value
return self
# It's just like Ruby! OMG. :(
HtmlElement.__bases__ += (MXMLGenericElementMixin,)
maker = ElementMaker(makeelement=html_parser.makeelement)
_cls_cache = {}
# TODO:
# Mark's thoughts about some of this:
# - making links/buttons is going to be such a common case we'll probably want a helper method in here for that
class HtmlGen(object):
def __init__(self, maker=maker):
super(HtmlGen, self).__init__()
self.maker = maker
def __getattr__(self, attr):
return partial(self.el, attr)
def el(self, tag, *args, **kwargs):
canon_tag = tag.strip().lower()
if canon_tag not in _cls_cache:
_cls_cache[canon_tag] = getattr(self.maker, canon_tag)
tag_cls = _cls_cache[canon_tag]
(classes, kwargs) = extract_classes(kwargs)
if classes:
args = args + (self._classes(*classes),)
style = kwargs.pop('style', None)
if style:
if isinstance(style, basestring):
args = args + ({'style': style},)
else:
args = args + (self._style(**style),)
data = kwargs.pop('data', None)
if data:
data_attributes = []
for key, val in data.iteritems():
encoded_val = val
if val is None:
encoded_val = ''
elif not isinstance(val, basestring):
encoded_val = json.dumps(val)
data_attributes.append({
'data-%s' % (key): encoded_val
})
args = args + tuple(data_attributes)
underscore_hacks = (
('for', 'for_'), # labels, The for attribute in html will connect a label to an input
('type', 'type_'), # inputs
('id', 'id_'), # inputs
)
for correct, hack in underscore_hacks:
value = kwargs.pop(hack, None)
if value:
args = args + ({correct: value}, )
# if args contains any lists (not dicts), they have to be splated.
# This allows presenter methods to return lists to html gen tags without have to wrap them in another element
# I could probably make a special html.notag([...]) or somthing that would trigger this instead of types
new_args = []
for arg in args:
if isinstance(arg, list):
arg_items = []
for item in arg:
if isinstance(item, basestring):
arg_items.append(IMPROPER_HTML_ENCODING.sub('', item))
else:
arg_items.append(item)
new_args.extend(arg_items)
elif isinstance(arg, basestring):
proper_arg = IMPROPER_HTML_ENCODING.sub('', arg)
new_args.append(proper_arg)
else:
new_args.append(arg)
return tag_cls(*new_args, **kwargs)
def data(self, key, value):
return {'data-%s' % key: value}
def entity(self, code):
return Entity(code)
# yui grid helpers:
def grid(self, *args, **kwargs):
(classes, kwargs) = extract_classes(kwargs)
classes = classes + ("yui3-g",)
return self.el('div', *args, class_=classes, **kwargs)
# yui grid helpers:
# html.unit(width="1-2", class_='myclass1', *[CONTENT])
# html.unit(width="1-2", class_=('myclass1', 'myclass2',), *[CONTENT])
# html.unit(witdh="1-2", CONTENT)
# html.unit(CONTENT)
def unit(self, *args, **kwargs):
(classes, kwargs) = extract_classes(kwargs)
width = kwargs.pop('width', "")
mwidth = kwargs.pop('mwidth', None)
if width is not "":
width = "-%s" % width
width_class = "yui3-u%s" % width
mwidth_class = ""
if mwidth:
if mwidth is not "":
mwidth = "-%s" % mwidth
mwidth_class = "m-yui3-u%s" % mwidth
classes = classes + (width_class, mwidth_class)
return self.el('div', *args, class_=classes, **kwargs)
# used in cases where you want either an <a> tag or a <span> tag
# eg. in a list of people, some names should be linked (ie. friends) and some not (ie. strangers)
# example: html.a_or_span(href=my_href, make_link=shoud_i_link_or_not, *[CONTENT])
def a_or_span(self, *args, **kwargs):
link = kwargs.pop('make_link')
if link:
tag = 'a'
else:
tag = 'span'
kwargs.pop('href')
return self.el(tag, *args, **kwargs)
# wrapper will be a tag that each presenter is wrapped in (li, div, etc)
# sep will be a tag that separates each presenter (hr, br, etc)
# Note: this hasn't really been tested yet so if html.present doesn't work, this could very well be broken since it returns a list
def present(self, presenters, wrapper=None, sep=None):
if not is_seq_not_string(presenters):
presenters = [presenters]
# is there a better way to do this with itertools? maybe but it seems like not for the sep part
output = []
for p in presenters:
if hasattr(p, 'render_html'):
rendered = p.render_html()
else:
rendered = p
if wrapper:
rendered = wrapper(rendered)
if is_seq_not_string(rendered) and not isinstance(rendered, HtmlElement):
output.extend(rendered)
else:
output.append(rendered)
if sep:
output.append(sep)
if sep:
output = output[:-1]
return output
# Do not call these methods, the _ means they're private. Use class_=... and style=....
def _classes(self, *classes):
return {'class': _format_classes(_flatten_classes(classes))}
# Turn a dictionary of style rules to the style attribute--no logic right now
def _style(self, **styles):
return {'style': "".join(["%s:%s;" % (k, v) for k, v in styles.iteritems()])}
| |
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Adding New Instrument
=====================
Any new instrument should be a subclass of Instrument and it must have a name.
When a new instrument is added to Workload Automation, the methods of the new
instrument will be found automatically and hooked up to the supported signals.
Once a signal is broadcasted, the corresponding registered method is invoked.
Each method in Instrument must take two arguments, which are self and context.
Supported signals can be found in [... link to signals ...] To make
implementations easier and common, the basic steps to add new instrument is
similar to the steps to add new workload.
Hence, the following methods are sufficient to implement to add new instrument:
- setup: This method is invoked after the workload is setup. All the
necessary setups should go inside this method. Setup, includes operations
like, pushing the files to the target device, install them, clear logs,
etc.
- start: It is invoked just before the workload start execution. Here is
where instrument measures start being registered/taken.
- stop: It is invoked just after the workload execution stops. The measures
should stop being taken/registered.
- update_result: It is invoked after the workload updated its result.
update_result is where the taken measures are added to the result so it
can be processed by Workload Automation.
- teardown is invoked after the workload is teared down. It is a good place
to clean any logs generated by the instrument.
For example, to add an instrument which will trace device errors, we subclass
Instrument and overwrite the variable name.::
#BINARY_FILE = os.path.join(os.path.dirname(__file__), 'trace')
class TraceErrorsInstrument(Instrument):
name = 'trace-errors'
def __init__(self, device):
super(TraceErrorsInstrument, self).__init__(device)
self.trace_on_device = os.path.join(self.device.working_directory, 'trace')
We then declare and implement the aforementioned methods. For the setup method,
we want to push the file to the target device and then change the file mode to
755 ::
def setup(self, context):
self.device.push_file(BINARY_FILE, self.device.working_directory)
self.device.execute('chmod 755 {}'.format(self.trace_on_device))
Then we implemented the start method, which will simply run the file to start
tracing. ::
def start(self, context):
self.device.execute('{} start'.format(self.trace_on_device))
Lastly, we need to stop tracing once the workload stops and this happens in the
stop method::
def stop(self, context):
self.device.execute('{} stop'.format(self.trace_on_device))
The generated result can be updated inside update_result, or if it is trace, we
just pull the file to the host device. context has a result variable which
has add_metric method. It can be used to add the instrumentation results metrics
to the final result for the workload. The method can be passed 4 params, which
are metric key, value, unit and lower_is_better, which is a boolean. ::
def update_result(self, context):
# pull the trace file to the device
result = os.path.join(self.device.working_directory, 'trace.txt')
self.device.pull_file(result, context.working_directory)
# parse the file if needs to be parsed, or add result to
# context.result
At the end, we might want to delete any files generated by the instrumentation
and the code to clear these file goes in teardown method. ::
def teardown(self, context):
self.device.delete_file(os.path.join(self.device.working_directory, 'trace.txt'))
"""
import logging
import inspect
from collections import OrderedDict
import wlauto.core.signal as signal
from wlauto.core.extension import Extension
from wlauto.exceptions import WAError, DeviceNotRespondingError, TimeoutError
from wlauto.utils.misc import get_traceback, isiterable
from wlauto.utils.types import identifier
logger = logging.getLogger('instrumentation')
# Maps method names onto signals the should be registered to.
# Note: the begin/end signals are paired -- if a begin_ signal is sent,
# then the corresponding end_ signal is guaranteed to also be sent.
# Note: using OrderedDict to preserve logical ordering for the table generated
# in the documentation
SIGNAL_MAP = OrderedDict([
# Below are "aliases" for some of the more common signals to allow
# instrumentation to have similar structure to workloads
('initialize', signal.RUN_INIT),
('setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('start', signal.BEFORE_WORKLOAD_EXECUTION),
('stop', signal.AFTER_WORKLOAD_EXECUTION),
('process_workload_result', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('update_result', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('finalize', signal.RUN_FIN),
('on_run_start', signal.RUN_START),
('on_run_end', signal.RUN_END),
('on_workload_spec_start', signal.WORKLOAD_SPEC_START),
('on_workload_spec_end', signal.WORKLOAD_SPEC_END),
('on_iteration_start', signal.ITERATION_START),
('on_iteration_end', signal.ITERATION_END),
('before_initial_boot', signal.BEFORE_INITIAL_BOOT),
('on_successful_initial_boot', signal.SUCCESSFUL_INITIAL_BOOT),
('after_initial_boot', signal.AFTER_INITIAL_BOOT),
('before_first_iteration_boot', signal.BEFORE_FIRST_ITERATION_BOOT),
('on_successful_first_iteration_boot', signal.SUCCESSFUL_FIRST_ITERATION_BOOT),
('after_first_iteration_boot', signal.AFTER_FIRST_ITERATION_BOOT),
('before_boot', signal.BEFORE_BOOT),
('on_successful_boot', signal.SUCCESSFUL_BOOT),
('after_boot', signal.AFTER_BOOT),
('on_spec_init', signal.SPEC_INIT),
('on_run_init', signal.RUN_INIT),
('on_iteration_init', signal.ITERATION_INIT),
('before_workload_setup', signal.BEFORE_WORKLOAD_SETUP),
('on_successful_workload_setup', signal.SUCCESSFUL_WORKLOAD_SETUP),
('after_workload_setup', signal.AFTER_WORKLOAD_SETUP),
('before_workload_execution', signal.BEFORE_WORKLOAD_EXECUTION),
('on_successful_workload_execution', signal.SUCCESSFUL_WORKLOAD_EXECUTION),
('after_workload_execution', signal.AFTER_WORKLOAD_EXECUTION),
('before_workload_result_update', signal.BEFORE_WORKLOAD_RESULT_UPDATE),
('on_successful_workload_result_update', signal.SUCCESSFUL_WORKLOAD_RESULT_UPDATE),
('after_workload_result_update', signal.AFTER_WORKLOAD_RESULT_UPDATE),
('before_workload_teardown', signal.BEFORE_WORKLOAD_TEARDOWN),
('on_successful_workload_teardown', signal.SUCCESSFUL_WORKLOAD_TEARDOWN),
('after_workload_teardown', signal.AFTER_WORKLOAD_TEARDOWN),
('before_overall_results_processing', signal.BEFORE_OVERALL_RESULTS_PROCESSING),
('on_successful_overall_results_processing', signal.SUCCESSFUL_OVERALL_RESULTS_PROCESSING),
('after_overall_results_processing', signal.AFTER_OVERALL_RESULTS_PROCESSING),
('on_error', signal.ERROR_LOGGED),
('on_warning', signal.WARNING_LOGGED),
])
PRIORITY_MAP = OrderedDict([
('very_fast_', 20),
('fast_', 10),
('normal_', 0),
('slow_', -10),
('very_slow_', -20),
])
installed = []
def is_installed(instrument):
if isinstance(instrument, Instrument):
if instrument in installed:
return True
if instrument.name in [i.name for i in installed]:
return True
elif isinstance(instrument, type):
if instrument in [i.__class__ for i in installed]:
return True
else: # assume string
if identifier(instrument) in [identifier(i.name) for i in installed]:
return True
return False
def is_enabled(instrument):
if isinstance(instrument, Instrument) or isinstance(instrument, type):
name = instrument.name
else: # assume string
name = instrument
try:
installed_instrument = get_instrument(name)
return installed_instrument.is_enabled
except ValueError:
return False
failures_detected = False
def reset_failures():
global failures_detected # pylint: disable=W0603
failures_detected = False
def check_failures():
result = failures_detected
reset_failures()
return result
class ManagedCallback(object):
"""
This wraps instruments' callbacks to ensure that errors do interfer
with run execution.
"""
def __init__(self, instrument, callback):
self.instrument = instrument
self.callback = callback
def __call__(self, context):
if self.instrument.is_enabled:
try:
self.callback(context)
except (KeyboardInterrupt, DeviceNotRespondingError, TimeoutError): # pylint: disable=W0703
raise
except Exception as e: # pylint: disable=W0703
logger.error('Error in insturment {}'.format(self.instrument.name))
global failures_detected # pylint: disable=W0603
failures_detected = True
if isinstance(e, WAError):
logger.error(e)
else:
tb = get_traceback()
logger.error(tb)
logger.error('{}({})'.format(e.__class__.__name__, e))
if not context.current_iteration:
# Error occureed outside of an iteration (most likely
# during intial setup or teardown). Since this would affect
# the rest of the run, mark the instument as broken so that
# it doesn't get re-enabled for subsequent iterations.
self.instrument.is_broken = True
disable(self.instrument)
# Need this to keep track of callbacks, because the dispatcher only keeps
# weak references, so if the callbacks aren't referenced elsewhere, they will
# be deallocated before they've had a chance to be invoked.
_callbacks = []
def install(instrument):
"""
This will look for methods (or any callable members) with specific names
in the instrument and hook them up to the corresponding signals.
:param instrument: Instrument instance to install.
"""
logger.debug('Installing instrument %s.', instrument)
if is_installed(instrument):
raise ValueError('Instrument {} is already installed.'.format(instrument.name))
for attr_name in dir(instrument):
priority = 0
stripped_attr_name = attr_name
for key, value in PRIORITY_MAP.iteritems():
if attr_name.startswith(key):
stripped_attr_name = attr_name[len(key):]
priority = value
break
if stripped_attr_name in SIGNAL_MAP:
attr = getattr(instrument, attr_name)
if not callable(attr):
raise ValueError('Attribute {} not callable in {}.'.format(attr_name, instrument))
argspec = inspect.getargspec(attr)
arg_num = len(argspec.args)
# Instrument callbacks will be passed exactly two arguments: self
# (the instrument instance to which the callback is bound) and
# context. However, we also allow callbacks to capture the context
# in variable arguments (declared as "*args" in the definition).
if arg_num > 2 or (arg_num < 2 and argspec.varargs is None):
message = '{} must take exactly 2 positional arguments; {} given.'
raise ValueError(message.format(attr_name, arg_num))
logger.debug('\tConnecting %s to %s', attr.__name__, SIGNAL_MAP[stripped_attr_name])
mc = ManagedCallback(instrument, attr)
_callbacks.append(mc)
signal.connect(mc, SIGNAL_MAP[stripped_attr_name], priority=priority)
installed.append(instrument)
def uninstall(instrument):
instrument = get_instrument(instrument)
installed.remove(instrument)
def validate():
for instrument in installed:
instrument.validate()
def get_instrument(inst):
if isinstance(inst, Instrument):
return inst
for installed_inst in installed:
if identifier(installed_inst.name) == identifier(inst):
return installed_inst
raise ValueError('Instrument {} is not installed'.format(inst))
def disable_all():
for instrument in installed:
_disable_instrument(instrument)
def enable_all():
for instrument in installed:
_enable_instrument(instrument)
def enable(to_enable):
if isiterable(to_enable):
for inst in to_enable:
_enable_instrument(inst)
else:
_enable_instrument(to_enable)
def disable(to_disable):
if isiterable(to_disable):
for inst in to_disable:
_disable_instrument(inst)
else:
_disable_instrument(to_disable)
def _enable_instrument(inst):
inst = get_instrument(inst)
if not inst.is_broken:
logger.debug('Enabling instrument {}'.format(inst.name))
inst.is_enabled = True
else:
logger.debug('Not enabling broken instrument {}'.format(inst.name))
def _disable_instrument(inst):
inst = get_instrument(inst)
if inst.is_enabled:
logger.debug('Disabling instrument {}'.format(inst.name))
inst.is_enabled = False
def get_enabled():
return [i for i in installed if i.is_enabled]
def get_disabled():
return [i for i in installed if not i.is_enabled]
class Instrument(Extension):
"""
Base class for instrumentation implementations.
"""
def __init__(self, device, **kwargs):
super(Instrument, self).__init__(**kwargs)
self.device = device
self.is_enabled = True
self.is_broken = False
def initialize(self, context):
pass
def finalize(self, context):
pass
def __str__(self):
return self.name
def __repr__(self):
return 'Instrument({})'.format(self.name)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import select
import time
import urllib.request
import aiohttp
import psycopg2
import psycopg2.extensions
import fumbbl_trans.exc
import fumbbl_trans.html_match
from fumbbl_trans.session import get_relax_iterator
import fumbbl_trans.xml_matches
from fplusdb_main import common_regex
from fplusdb_main import sql_commands
from fplusdb_main.schema import MAIN
import fplusdb_main.transimp.html_match
import fplusdb_main.transimp.xml_matches
import fumnotifwscli
import fumlocxmlmatches
class BasePosgreSQLScript:
async_ = False
autocommit = None
isol_lvl = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
def __init__(self, dsn):
self.dsn = dsn
self.conn = None
def connect(self, isol_lvl=None, async_=None,
autocommit=None):
assert not self.conn, 'already connected'
isol_lvl = isol_lvl or self.isol_lvl
async_ = async_ or self.async_
autocommit = autocommit or self.autocommit
self.conn = psycopg2.connect(self.dsn,
connection_factory=None,
cursor_factory=None,
async=async_)
if not async_:
self.conn.set_session(
isolation_level=isol_lvl,
readonly=None,
deferrable=None,
autocommit=autocommit)
def __enter__(self):
self.connect()
return self
def __exit__(self, type_, value, traceback):
self.conn.close()
self.conn = None
class Script(BasePosgreSQLScript):
async_ = False
autocommit = True
isol_lvl = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
logger = logging.getLogger(__name__)
def __init__(self, dsn, ids, *,
hhost='localhost', hport=8080,
ncli=False, nhost='localhost', nport=22224,
nores_stop=False, relax=60, upsert=False,
**kwargs):
super().__init__(dsn)
self.ids = ids
self.hhost = hhost
self.hport = hport
self.ncli = ncli
self.nhost = nhost
self.nport = nport
self.nores_stop = nores_stop
self.relax = relax
self.upsert = upsert
def get_idgen(self):
assert self.conn, 'not connected'
conn = self.conn
ids = [('H' if i == 'INF' else i) for i in list(self.ids)]
params = {
'L': sql_commands.select_lowest_match_id_without_local_replay_data,
'H': sql_commands.select_highest_match_id_without_local_replay_data
}
for i, id_ in enumerate(ids):
f = params.get(id_, (None, None))
if f:
with conn:
with conn.cursor() as cur:
c = f(cur)
cur.execute(c)
result = cur.fetchone()
if result:
ids[i] = result[0]
if len(ids) == 1:
ids.append(ids[0])
cur = conn.cursor()
c = sql_commands.select_match_ids_without_local_replay_data(
cur, *ids)
cur.execute(c)
return (r[0] for r in cur)
def start(self):
with self:
conn = self.conn
if 'INF' in list(self.ids):
with conn:
with conn.cursor() as cur:
cur.execute("LISTEN match_insert;")
# http://initd.org/psycopg/docs/advanced.html#asynch
# ronous-notifications
while True:
if select.select([conn],[],[],5) == ([],[],[]):
pass
else:
conn.poll()
while conn.notifies:
notify = conn.notifies.pop(0)
mid = int(notify.payload)
self._ensure_replay_of_match(mid)
else:
idgen = self.get_idgen()
for mid in idgen:
self._ensure_replay_of_match(mid)
def _ensure_replay_of_match(self, match_id):
fs = 'getting replay #{} (Match #{})...'
conn = self.conn
with conn.cursor() as cur:
c = sql_commands.select_replay_id_of_match(cur, match_id)
cur.execute(c)
replay_id = cur.fetchone()[0]
self.logger.info(fs.format(replay_id, match_id))
rit = get_relax_iterator()
attempt = 1
fs = 'http://127.0.0.1:8080/ensurereplay?id={}'
for i, relax in enumerate(rit):
try:
with urllib.request.urlopen(
fs.format(replay_id)) as response:
html = response.read()
except urllib.error.HTTPError as err:
if 4 <= attempt:
c = sql_commands.update_local_replay_data(cur,
match_id, replay_id, 'not available')
cur.execute(c)
break
errfs = '{}; attempt: {}/3; relax: {}'
self.logger.warning(errfs.format(err, attempt, relax))
time.sleep(relax)
attempt += 1
else:
self.logger.info('{:0>8}: ensured'.format(replay_id))
c = sql_commands.update_local_replay_data(cur,
match_id, replay_id, 'ok')
cur.execute(c)
break
def get_main_params():
parser = argparse.ArgumentParser(parents=(
fumlocxmlmatches.log_parser,
))
parser.add_argument('dsn',
help=('PostgreSQL connection string of the '
'FUMBBLPlus database'))
_orig_format_args = parser.formatter_class._format_args
def _format_args(inst, action, default_metavar):
result = _orig_format_args(inst, action, default_metavar)
if isinstance(action, fumlocxmlmatches.IDRangeAction):
result = result.replace(' ...]', ']')
return result
parser.formatter_class._format_args = _format_args
parser.add_argument('ids', metavar='ID', nargs='+',
action=fumlocxmlmatches.IDRangeAction,
help=('One or two arguments indicating a specific match '
'or a range of matches. They accept integer values or '
'either "L" or "H". The second argument also accepts '
'"INF" as value representing positive infinity. '
'Here, "L" represents the match ID below the lowest '
'match ID in the collection. Similarly, "H" represents '
'the match ID above the highest actual match ID in the '
'collection.'))
parser.add_argument('--hhost', default='localhost',
help='http host (default: localhost)')
parser.add_argument('--hport', type=int, default=8080,
help='http port (default: 8080)')
parser.add_argument('-n', '--ncli',
action='store_true', default=False,
help='enable notifier client for infinity mode')
parser.add_argument('--nores_stop',
action='store_true',
default=False,
help=('enable stopping if there is no result'))
parser.add_argument('--nhost', default='localhost',
help='notifier host (default: localhost)')
parser.add_argument('--nport', type=int, default=22224,
help='notifier port (default: 22224)')
parser.add_argument('-r', '--relax', default=60,
help=('sets the relax time before checking for new match '
'ID in regular infinity mode (default: 60)'))
parser.add_argument('-u', '--upsert', action='store_true',
default=False,
help='enable updating of existing rows')
args = parser.parse_args()
return dict(args._get_kwargs())
def main():
params = get_main_params()
hdlr = logging.StreamHandler(params['logto'])
hdlr.setFormatter(fumlocxmlmatches.LOG_FORMATTER)
Script.logger.setLevel(params['loglevel'])
Script.logger.addHandler(hdlr)
script = Script(**params)
script.start()
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.utils import next
from jinja2.lexer import describe_token, describe_token_expr
#: statements that callinto
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(map(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target()
self.stream.expect('assign')
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
token = next(self.stream)
if token.test('name:elif'):
new_node = nodes.If(lineno=self.stream.current.lineno)
node.else_ = [new_node]
node = new_node
continue
elif token.test('name:else'):
node.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
else:
node.else_ = []
break
return result
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
break
if not hasattr(node, 'with_context'):
node.with_context = False
self.stream.skip_if('comma')
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function.
"""
if name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_add()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_add()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_add()))
elif self.stream.current.test('name:not') and \
self.stream.look().test('name:in'):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_add()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_add(self):
lineno = self.stream.current.lineno
left = self.parse_sub()
while self.stream.current.type == 'add':
next(self.stream)
right = self.parse_sub()
left = nodes.Add(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_sub(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type == 'sub':
next(self.stream)
right = self.parse_concat()
left = nodes.Sub(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_mul()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_mul())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_mul(self):
lineno = self.stream.current.lineno
left = self.parse_div()
while self.stream.current.type == 'mul':
next(self.stream)
right = self.parse_div()
left = nodes.Mul(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_div(self):
lineno = self.stream.current.lineno
left = self.parse_floordiv()
while self.stream.current.type == 'div':
next(self.stream)
right = self.parse_floordiv()
left = nodes.Div(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_floordiv(self):
lineno = self.stream.current.lineno
left = self.parse_mod()
while self.stream.current.type == 'floordiv':
next(self.stream)
right = self.parse_mod()
left = nodes.FloorDiv(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_mod(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type == 'mod':
next(self.stream)
right = self.parse_pow()
left = nodes.Mod(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not \
self.stream.current.test_any('name:else', 'name:or',
'name:and'):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_expression()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Response parsers for the various protocol types.
The module contains classes that can take an HTTP response, and given
an output shape, parse the response into a dict according to the
rules in the output shape.
There are many similarities amongst the different protocols with regard
to response parsing, and the code is structured in a way to avoid
code duplication when possible. The diagram below is a diagram
showing the inheritance hierarchy of the response classes.
::
+--------------+
|ResponseParser|
+--------------+
^ ^ ^
+--------------------+ | +-------------------+
| | |
+----------+----------+ +------+-------+ +-------+------+
|BaseXMLResponseParser| |BaseRestParser| |BaseJSONParser|
+---------------------+ +--------------+ +--------------+
^ ^ ^ ^ ^ ^
| | | | | |
| | | | | |
| ++----------+-+ +-+-----------++ |
| |RestXMLParser| |RestJSONParser| |
+-----+-----+ +-------------+ +--------------+ +----+-----+
|QueryParser| |JSONParser|
+-----------+ +----------+
The diagram above shows that there is a base class, ``ResponseParser`` that
contains logic that is similar amongst all the different protocols (``query``,
``json``, ``rest-json``, ``rest-xml``). Amongst the various services there
is shared logic that can be grouped several ways:
* The ``query`` and ``rest-xml`` both have XML bodies that are parsed in the
same way.
* The ``json`` and ``rest-json`` protocols both have JSON bodies that are
parsed in the same way.
* The ``rest-json`` and ``rest-xml`` protocols have additional attributes
besides body parameters that are parsed the same (headers, query string,
status code).
This is reflected in the class diagram above. The ``BaseXMLResponseParser``
and the BaseJSONParser contain logic for parsing the XML/JSON body,
and the BaseRestParser contains logic for parsing out attributes that
come from other parts of the HTTP response. Classes like the
``RestXMLParser`` inherit from the ``BaseXMLResponseParser`` to get the
XML body parsing logic and the ``BaseRestParser`` to get the HTTP
header/status code/query string parsing.
Additionally, there are event stream parsers that are used by the other parsers
to wrap streaming bodies that represent a stream of events. The
BaseEventStreamParser extends from ResponseParser and defines the logic for
parsing values from the headers and payload of a message from the underlying
binary encoding protocol. Currently, event streams support parsing bodies
encoded as JSON and XML through the following hierarchy.
+--------------+
|ResponseParser|
+--------------+
^ ^ ^
+--------------------+ | +------------------+
| | |
+----------+----------+ +----------+----------+ +-------+------+
|BaseXMLResponseParser| |BaseEventStreamParser| |BaseJSONParser|
+---------------------+ +---------------------+ +--------------+
^ ^ ^ ^
| | | |
| | | |
+-+----------------+-+ +-+-----------------+-+
|EventStreamXMLParser| |EventStreamJSONParser|
+--------------------+ +---------------------+
Return Values
=============
Each call to ``parse()`` returns a dict has this form::
Standard Response
{
"ResponseMetadata": {"RequestId": <requestid>}
<response keys>
}
Error response
{
"ResponseMetadata": {"RequestId": <requestid>}
"Error": {
"Code": <string>,
"Message": <string>,
"Type": <string>,
<additional keys>
}
}
"""
import re
import base64
import json
import logging
from botocore.compat import six, ETree, XMLParseError
from botocore.eventstream import EventStream, NoInitialResponseError
from botocore.utils import parse_timestamp, merge_dicts, \
is_json_value_header, lowercase_dict
LOG = logging.getLogger(__name__)
DEFAULT_TIMESTAMP_PARSER = parse_timestamp
class ResponseParserFactory(object):
def __init__(self):
self._defaults = {}
def set_parser_defaults(self, **kwargs):
"""Set default arguments when a parser instance is created.
You can specify any kwargs that are allowed by a ResponseParser
class. There are currently two arguments:
* timestamp_parser - A callable that can parse a timestamp string
* blob_parser - A callable that can parse a blob type
"""
self._defaults.update(kwargs)
def create_parser(self, protocol_name):
parser_cls = PROTOCOL_PARSERS[protocol_name]
return parser_cls(**self._defaults)
def create_parser(protocol):
return ResponseParserFactory().create_parser(protocol)
def _text_content(func):
# This decorator hides the difference between
# an XML node with text or a plain string. It's used
# to ensure that scalar processing operates only on text
# strings, which allows the same scalar handlers to be used
# for XML nodes from the body and HTTP headers.
def _get_text_content(self, shape, node_or_string):
if hasattr(node_or_string, 'text'):
text = node_or_string.text
if text is None:
# If an XML node is empty <foo></foo>,
# we want to parse that as an empty string,
# not as a null/None value.
text = ''
else:
text = node_or_string
return func(self, shape, text)
return _get_text_content
class ResponseParserError(Exception):
pass
class ResponseParser(object):
"""Base class for response parsing.
This class represents the interface that all ResponseParsers for the
various protocols must implement.
This class will take an HTTP response and a model shape and parse the
HTTP response into a dictionary.
There is a single public method exposed: ``parse``. See the ``parse``
docstring for more info.
"""
DEFAULT_ENCODING = 'utf-8'
EVENT_STREAM_PARSER_CLS = None
def __init__(self, timestamp_parser=None, blob_parser=None):
if timestamp_parser is None:
timestamp_parser = DEFAULT_TIMESTAMP_PARSER
self._timestamp_parser = timestamp_parser
if blob_parser is None:
blob_parser = self._default_blob_parser
self._blob_parser = blob_parser
self._event_stream_parser = None
if self.EVENT_STREAM_PARSER_CLS is not None:
self._event_stream_parser = self.EVENT_STREAM_PARSER_CLS(
timestamp_parser, blob_parser)
def _default_blob_parser(self, value):
# Blobs are always returned as bytes type (this matters on python3).
# We don't decode this to a str because it's entirely possible that the
# blob contains binary data that actually can't be decoded.
return base64.b64decode(value)
def parse(self, response, shape):
"""Parse the HTTP response given a shape.
:param response: The HTTP response dictionary. This is a dictionary
that represents the HTTP request. The dictionary must have the
following keys, ``body``, ``headers``, and ``status_code``.
:param shape: The model shape describing the expected output.
:return: Returns a dictionary representing the parsed response
described by the model. In addition to the shape described from
the model, each response will also have a ``ResponseMetadata``
which contains metadata about the response, which contains at least
two keys containing ``RequestId`` and ``HTTPStatusCode``. Some
responses may populate additional keys, but ``RequestId`` will
always be present.
"""
LOG.debug('Response headers: %s', response['headers'])
LOG.debug('Response body:\n%s', response['body'])
if response['status_code'] >= 301:
if self._is_generic_error_response(response):
parsed = self._do_generic_error_parse(response)
elif self._is_modeled_error_shape(shape):
parsed = self._do_modeled_error_parse(response, shape)
# We don't want to decorate the modeled fields with metadata
return parsed
else:
parsed = self._do_error_parse(response, shape)
else:
parsed = self._do_parse(response, shape)
# We don't want to decorate event stream responses with metadata
if shape and shape.serialization.get('eventstream'):
return parsed
# Add ResponseMetadata if it doesn't exist and inject the HTTP
# status code and headers from the response.
if isinstance(parsed, dict):
response_metadata = parsed.get('ResponseMetadata', {})
response_metadata['HTTPStatusCode'] = response['status_code']
# Ensure that the http header keys are all lower cased. Older
# versions of urllib3 (< 1.11) would unintentionally do this for us
# (see urllib3#633). We need to do this conversion manually now.
headers = response['headers']
response_metadata['HTTPHeaders'] = lowercase_dict(headers)
parsed['ResponseMetadata'] = response_metadata
return parsed
def _is_modeled_error_shape(self, shape):
return shape is not None and shape.metadata.get('exception', False)
def _is_generic_error_response(self, response):
# There are times when a service will respond with a generic
# error response such as:
# '<html><body><b>Http/1.1 Service Unavailable</b></body></html>'
#
# This can also happen if you're going through a proxy.
# In this case the protocol specific _do_error_parse will either
# fail to parse the response (in the best case) or silently succeed
# and treat the HTML above as an XML response and return
# non sensical parsed data.
# To prevent this case from happening we first need to check
# whether or not this response looks like the generic response.
if response['status_code'] >= 500:
if 'body' not in response or response['body'] is None:
return True
body = response['body'].strip()
return body.startswith(b'<html>') or not body
def _do_generic_error_parse(self, response):
# There's not really much we can do when we get a generic
# html response.
LOG.debug("Received a non protocol specific error response from the "
"service, unable to populate error code and message.")
return {
'Error': {'Code': str(response['status_code']),
'Message': six.moves.http_client.responses.get(
response['status_code'], '')},
'ResponseMetadata': {},
}
def _do_parse(self, response, shape):
raise NotImplementedError("%s._do_parse" % self.__class__.__name__)
def _do_error_parse(self, response, shape):
raise NotImplementedError(
"%s._do_error_parse" % self.__class__.__name__)
def _do_modeled_error_parse(self, response, shape, parsed):
raise NotImplementedError(
"%s._do_modeled_error_parse" % self.__class__.__name__)
def _parse_shape(self, shape, node):
handler = getattr(self, '_handle_%s' % shape.type_name,
self._default_handle)
return handler(shape, node)
def _handle_list(self, shape, node):
# Enough implementations share list serialization that it's moved
# up here in the base class.
parsed = []
member_shape = shape.member
for item in node:
parsed.append(self._parse_shape(member_shape, item))
return parsed
def _default_handle(self, shape, value):
return value
def _create_event_stream(self, response, shape):
parser = self._event_stream_parser
name = response['context'].get('operation_name')
return EventStream(response['body'], shape, parser, name)
class BaseXMLResponseParser(ResponseParser):
def __init__(self, timestamp_parser=None, blob_parser=None):
super(BaseXMLResponseParser, self).__init__(timestamp_parser,
blob_parser)
self._namespace_re = re.compile('{.*}')
def _handle_map(self, shape, node):
parsed = {}
key_shape = shape.key
value_shape = shape.value
key_location_name = key_shape.serialization.get('name') or 'key'
value_location_name = value_shape.serialization.get('name') or 'value'
if shape.serialization.get('flattened') and not isinstance(node, list):
node = [node]
for keyval_node in node:
for single_pair in keyval_node:
# Within each <entry> there's a <key> and a <value>
tag_name = self._node_tag(single_pair)
if tag_name == key_location_name:
key_name = self._parse_shape(key_shape, single_pair)
elif tag_name == value_location_name:
val_name = self._parse_shape(value_shape, single_pair)
else:
raise ResponseParserError("Unknown tag: %s" % tag_name)
parsed[key_name] = val_name
return parsed
def _node_tag(self, node):
return self._namespace_re.sub('', node.tag)
def _handle_list(self, shape, node):
# When we use _build_name_to_xml_node, repeated elements are aggregated
# into a list. However, we can't tell the difference between a scalar
# value and a single element flattened list. So before calling the
# real _handle_list, we know that "node" should actually be a list if
# it's flattened, and if it's not, then we make it a one element list.
if shape.serialization.get('flattened') and not isinstance(node, list):
node = [node]
return super(BaseXMLResponseParser, self)._handle_list(shape, node)
def _handle_structure(self, shape, node):
parsed = {}
members = shape.members
if shape.metadata.get('exception', False):
node = self._get_error_root(node)
xml_dict = self._build_name_to_xml_node(node)
for member_name in members:
member_shape = members[member_name]
if 'location' in member_shape.serialization or \
member_shape.serialization.get('eventheader'):
# All members with locations have already been handled,
# so we don't need to parse these members.
continue
xml_name = self._member_key_name(member_shape, member_name)
member_node = xml_dict.get(xml_name)
if member_node is not None:
parsed[member_name] = self._parse_shape(
member_shape, member_node)
elif member_shape.serialization.get('xmlAttribute'):
attribs = {}
location_name = member_shape.serialization['name']
for key, value in node.attrib.items():
new_key = self._namespace_re.sub(
location_name.split(':')[0] + ':', key)
attribs[new_key] = value
if location_name in attribs:
parsed[member_name] = attribs[location_name]
return parsed
def _get_error_root(self, original_root):
if self._node_tag(original_root) == 'ErrorResponse':
for child in original_root:
if self._node_tag(child) == 'Error':
return child
return original_root
def _member_key_name(self, shape, member_name):
# This method is needed because we have to special case flattened list
# with a serialization name. If this is the case we use the
# locationName from the list's member shape as the key name for the
# surrounding structure.
if shape.type_name == 'list' and shape.serialization.get('flattened'):
list_member_serialized_name = shape.member.serialization.get(
'name')
if list_member_serialized_name is not None:
return list_member_serialized_name
serialized_name = shape.serialization.get('name')
if serialized_name is not None:
return serialized_name
return member_name
def _build_name_to_xml_node(self, parent_node):
# If the parent node is actually a list. We should not be trying
# to serialize it to a dictionary. Instead, return the first element
# in the list.
if isinstance(parent_node, list):
return self._build_name_to_xml_node(parent_node[0])
xml_dict = {}
for item in parent_node:
key = self._node_tag(item)
if key in xml_dict:
# If the key already exists, the most natural
# way to handle this is to aggregate repeated
# keys into a single list.
# <foo>1</foo><foo>2</foo> -> {'foo': [Node(1), Node(2)]}
if isinstance(xml_dict[key], list):
xml_dict[key].append(item)
else:
# Convert from a scalar to a list.
xml_dict[key] = [xml_dict[key], item]
else:
xml_dict[key] = item
return xml_dict
def _parse_xml_string_to_dom(self, xml_string):
try:
parser = ETree.XMLParser(
target=ETree.TreeBuilder(),
encoding=self.DEFAULT_ENCODING)
parser.feed(xml_string)
root = parser.close()
except XMLParseError as e:
raise ResponseParserError(
"Unable to parse response (%s), "
"invalid XML received. Further retries may succeed:\n%s" %
(e, xml_string))
return root
def _replace_nodes(self, parsed):
for key, value in parsed.items():
if list(value):
sub_dict = self._build_name_to_xml_node(value)
parsed[key] = self._replace_nodes(sub_dict)
else:
parsed[key] = value.text
return parsed
@_text_content
def _handle_boolean(self, shape, text):
if text == 'true':
return True
else:
return False
@_text_content
def _handle_float(self, shape, text):
return float(text)
@_text_content
def _handle_timestamp(self, shape, text):
return self._timestamp_parser(text)
@_text_content
def _handle_integer(self, shape, text):
return int(text)
@_text_content
def _handle_string(self, shape, text):
return text
@_text_content
def _handle_blob(self, shape, text):
return self._blob_parser(text)
_handle_character = _handle_string
_handle_double = _handle_float
_handle_long = _handle_integer
class QueryParser(BaseXMLResponseParser):
def _do_error_parse(self, response, shape):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
# Once we've converted xml->dict, we need to make one or two
# more adjustments to extract nested errors and to be consistent
# with ResponseMetadata for non-error responses:
# 1. {"Errors": {"Error": {...}}} -> {"Error": {...}}
# 2. {"RequestId": "id"} -> {"ResponseMetadata": {"RequestId": "id"}}
if 'Errors' in parsed:
parsed.update(parsed.pop('Errors'))
if 'RequestId' in parsed:
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
return parsed
def _do_modeled_error_parse(self, response, shape):
return self._parse_body_as_xml(response, shape, inject_metadata=False)
def _do_parse(self, response, shape):
return self._parse_body_as_xml(response, shape, inject_metadata=True)
def _parse_body_as_xml(self, response, shape, inject_metadata=True):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = {}
if shape is not None:
start = root
if 'resultWrapper' in shape.serialization:
start = self._find_result_wrapped_shape(
shape.serialization['resultWrapper'],
root)
parsed = self._parse_shape(shape, start)
if inject_metadata:
self._inject_response_metadata(root, parsed)
return parsed
def _find_result_wrapped_shape(self, element_name, xml_root_node):
mapping = self._build_name_to_xml_node(xml_root_node)
return mapping[element_name]
def _inject_response_metadata(self, node, inject_into):
mapping = self._build_name_to_xml_node(node)
child_node = mapping.get('ResponseMetadata')
if child_node is not None:
sub_mapping = self._build_name_to_xml_node(child_node)
for key, value in sub_mapping.items():
sub_mapping[key] = value.text
inject_into['ResponseMetadata'] = sub_mapping
class EC2QueryParser(QueryParser):
def _inject_response_metadata(self, node, inject_into):
mapping = self._build_name_to_xml_node(node)
child_node = mapping.get('requestId')
if child_node is not None:
inject_into['ResponseMetadata'] = {'RequestId': child_node.text}
def _do_error_parse(self, response, shape):
# EC2 errors look like:
# <Response>
# <Errors>
# <Error>
# <Code>InvalidInstanceID.Malformed</Code>
# <Message>Invalid id: "1343124"</Message>
# </Error>
# </Errors>
# <RequestID>12345</RequestID>
# </Response>
# This is different from QueryParser in that it's RequestID,
# not RequestId
original = super(EC2QueryParser, self)._do_error_parse(response, shape)
if 'RequestID' in original:
original['ResponseMetadata'] = {
'RequestId': original.pop('RequestID')
}
return original
def _get_error_root(self, original_root):
for child in original_root:
if self._node_tag(child) == 'Errors':
for errors_child in child:
if self._node_tag(errors_child) == 'Error':
return errors_child
return original_root
class BaseJSONParser(ResponseParser):
def _handle_structure(self, shape, value):
final_parsed = {}
if shape.is_document_type:
final_parsed = value
else:
member_shapes = shape.members
if value is None:
# If the comes across the wire as "null" (None in python),
# we should be returning this unchanged, instead of as an
# empty dict.
return None
final_parsed = {}
for member_name in member_shapes:
member_shape = member_shapes[member_name]
json_name = member_shape.serialization.get('name', member_name)
raw_value = value.get(json_name)
if raw_value is not None:
final_parsed[member_name] = self._parse_shape(
member_shapes[member_name],
raw_value)
return final_parsed
def _handle_map(self, shape, value):
parsed = {}
key_shape = shape.key
value_shape = shape.value
for key, value in value.items():
actual_key = self._parse_shape(key_shape, key)
actual_value = self._parse_shape(value_shape, value)
parsed[actual_key] = actual_value
return parsed
def _handle_blob(self, shape, value):
return self._blob_parser(value)
def _handle_timestamp(self, shape, value):
return self._timestamp_parser(value)
def _do_error_parse(self, response, shape):
body = self._parse_body_as_json(response['body'])
error = {"Error": {"Message": '', "Code": ''}, "ResponseMetadata": {}}
# Error responses can have slightly different structures for json.
# The basic structure is:
#
# {"__type":"ConnectClientException",
# "message":"The error message."}
# The error message can either come in the 'message' or 'Message' key
# so we need to check for both.
error['Error']['Message'] = body.get('message',
body.get('Message', ''))
# if the message did not contain an error code
# include the response status code
response_code = response.get('status_code')
code = body.get('__type', response_code and str(response_code))
if code is not None:
# code has a couple forms as well:
# * "com.aws.dynamodb.vAPI#ProvisionedThroughputExceededException"
# * "ResourceNotFoundException"
if '#' in code:
code = code.rsplit('#', 1)[1]
error['Error']['Code'] = code
self._inject_response_metadata(error, response['headers'])
return error
def _inject_response_metadata(self, parsed, headers):
if 'x-amzn-requestid' in headers:
parsed.setdefault('ResponseMetadata', {})['RequestId'] = (
headers['x-amzn-requestid'])
def _parse_body_as_json(self, body_contents):
if not body_contents:
return {}
body = body_contents.decode(self.DEFAULT_ENCODING)
try:
original_parsed = json.loads(body)
return original_parsed
except ValueError:
# if the body cannot be parsed, include
# the literal string as the message
return { 'message': body }
class BaseEventStreamParser(ResponseParser):
def _do_parse(self, response, shape):
final_parsed = {}
if shape.serialization.get('eventstream'):
event_type = response['headers'].get(':event-type')
event_shape = shape.members.get(event_type)
if event_shape:
final_parsed[event_type] = self._do_parse(response, event_shape)
else:
self._parse_non_payload_attrs(response, shape,
shape.members, final_parsed)
self._parse_payload(response, shape, shape.members, final_parsed)
return final_parsed
def _do_error_parse(self, response, shape):
exception_type = response['headers'].get(':exception-type')
exception_shape = shape.members.get(exception_type)
if exception_shape is not None:
original_parsed = self._initial_body_parse(response['body'])
body = self._parse_shape(exception_shape, original_parsed)
error = {
'Error': {
'Code': exception_type,
'Message': body.get('Message', body.get('message', ''))
}
}
else:
error = {
'Error': {
'Code': response['headers'].get(':error-code', ''),
'Message': response['headers'].get(':error-message', ''),
}
}
return error
def _parse_payload(self, response, shape, member_shapes, final_parsed):
if shape.serialization.get('event'):
for name in member_shapes:
member_shape = member_shapes[name]
if member_shape.serialization.get('eventpayload'):
body = response['body']
if member_shape.type_name == 'blob':
parsed_body = body
elif member_shape.type_name == 'string':
parsed_body = body.decode(self.DEFAULT_ENCODING)
else:
raw_parse = self._initial_body_parse(body)
parsed_body = self._parse_shape(member_shape, raw_parse)
final_parsed[name] = parsed_body
return
# If we didn't find an explicit payload, use the current shape
original_parsed = self._initial_body_parse(response['body'])
body_parsed = self._parse_shape(shape, original_parsed)
final_parsed.update(body_parsed)
def _parse_non_payload_attrs(self, response, shape,
member_shapes, final_parsed):
headers = response['headers']
for name in member_shapes:
member_shape = member_shapes[name]
if member_shape.serialization.get('eventheader'):
if name in headers:
value = headers[name]
if member_shape.type_name == 'timestamp':
# Event stream timestamps are an in milleseconds so we
# divide by 1000 to convert to seconds.
value = self._timestamp_parser(value / 1000.0)
final_parsed[name] = value
def _initial_body_parse(self, body_contents):
# This method should do the initial xml/json parsing of the
# body. We we still need to walk the parsed body in order
# to convert types, but this method will do the first round
# of parsing.
raise NotImplementedError("_initial_body_parse")
class EventStreamJSONParser(BaseEventStreamParser, BaseJSONParser):
def _initial_body_parse(self, body_contents):
return self._parse_body_as_json(body_contents)
class EventStreamXMLParser(BaseEventStreamParser, BaseXMLResponseParser):
def _initial_body_parse(self, xml_string):
if not xml_string:
return ETree.Element('')
return self._parse_xml_string_to_dom(xml_string)
class JSONParser(BaseJSONParser):
EVENT_STREAM_PARSER_CLS = EventStreamJSONParser
"""Response parser for the "json" protocol."""
def _do_parse(self, response, shape):
parsed = {}
if shape is not None:
event_name = shape.event_stream_name
if event_name:
parsed = self._handle_event_stream(response, shape, event_name)
else:
parsed = self._handle_json_body(response['body'], shape)
self._inject_response_metadata(parsed, response['headers'])
return parsed
def _do_modeled_error_parse(self, response, shape):
return self._handle_json_body(response['body'], shape)
def _handle_event_stream(self, response, shape, event_name):
event_stream_shape = shape.members[event_name]
event_stream = self._create_event_stream(response, event_stream_shape)
try:
event = event_stream.get_initial_response()
except NoInitialResponseError:
error_msg = 'First event was not of type initial-response'
raise ResponseParserError(error_msg)
parsed = self._handle_json_body(event.payload, shape)
parsed[event_name] = event_stream
return parsed
def _handle_json_body(self, raw_body, shape):
# The json.loads() gives us the primitive JSON types,
# but we need to traverse the parsed JSON data to convert
# to richer types (blobs, timestamps, etc.
parsed_json = self._parse_body_as_json(raw_body)
return self._parse_shape(shape, parsed_json)
class BaseRestParser(ResponseParser):
def _do_parse(self, response, shape):
final_parsed = {}
final_parsed['ResponseMetadata'] = self._populate_response_metadata(
response)
self._add_modeled_parse(response, shape, final_parsed)
return final_parsed
def _add_modeled_parse(self, response, shape, final_parsed):
if shape is None:
return final_parsed
member_shapes = shape.members
self._parse_non_payload_attrs(response, shape,
member_shapes, final_parsed)
self._parse_payload(response, shape, member_shapes, final_parsed)
def _do_modeled_error_parse(self, response, shape):
final_parsed = {}
self._add_modeled_parse(response, shape, final_parsed)
return final_parsed
def _populate_response_metadata(self, response):
metadata = {}
headers = response['headers']
if 'x-amzn-requestid' in headers:
metadata['RequestId'] = headers['x-amzn-requestid']
elif 'x-amz-request-id' in headers:
metadata['RequestId'] = headers['x-amz-request-id']
# HostId is what it's called whenever this value is returned
# in an XML response body, so to be consistent, we'll always
# call is HostId.
metadata['HostId'] = headers.get('x-amz-id-2', '')
return metadata
def _parse_payload(self, response, shape, member_shapes, final_parsed):
if 'payload' in shape.serialization:
# If a payload is specified in the output shape, then only that
# shape is used for the body payload.
payload_member_name = shape.serialization['payload']
body_shape = member_shapes[payload_member_name]
if body_shape.serialization.get('eventstream'):
body = self._create_event_stream(response, body_shape)
final_parsed[payload_member_name] = body
elif body_shape.type_name in ['string', 'blob']:
# This is a stream
body = response['body']
if isinstance(body, bytes):
body = body.decode(self.DEFAULT_ENCODING)
final_parsed[payload_member_name] = body
else:
original_parsed = self._initial_body_parse(response['body'])
final_parsed[payload_member_name] = self._parse_shape(
body_shape, original_parsed)
else:
original_parsed = self._initial_body_parse(response['body'])
body_parsed = self._parse_shape(shape, original_parsed)
final_parsed.update(body_parsed)
def _parse_non_payload_attrs(self, response, shape,
member_shapes, final_parsed):
headers = response['headers']
for name in member_shapes:
member_shape = member_shapes[name]
location = member_shape.serialization.get('location')
if location is None:
continue
elif location == 'statusCode':
final_parsed[name] = self._parse_shape(
member_shape, response['status_code'])
elif location == 'headers':
final_parsed[name] = self._parse_header_map(member_shape,
headers)
elif location == 'header':
header_name = member_shape.serialization.get('name', name)
if header_name in headers:
final_parsed[name] = self._parse_shape(
member_shape, headers[header_name])
def _parse_header_map(self, shape, headers):
# Note that headers are case insensitive, so we .lower()
# all header names and header prefixes.
parsed = {}
prefix = shape.serialization.get('name', '').lower()
for header_name in headers:
if header_name.lower().startswith(prefix):
# The key name inserted into the parsed hash
# strips off the prefix.
name = header_name[len(prefix):]
parsed[name] = headers[header_name]
return parsed
def _initial_body_parse(self, body_contents):
# This method should do the initial xml/json parsing of the
# body. We we still need to walk the parsed body in order
# to convert types, but this method will do the first round
# of parsing.
raise NotImplementedError("_initial_body_parse")
def _handle_string(self, shape, value):
parsed = value
if is_json_value_header(shape):
decoded = base64.b64decode(value).decode(self.DEFAULT_ENCODING)
parsed = json.loads(decoded)
return parsed
class RestJSONParser(BaseRestParser, BaseJSONParser):
EVENT_STREAM_PARSER_CLS = EventStreamJSONParser
def _initial_body_parse(self, body_contents):
return self._parse_body_as_json(body_contents)
def _do_error_parse(self, response, shape):
error = super(RestJSONParser, self)._do_error_parse(response, shape)
self._inject_error_code(error, response)
return error
def _inject_error_code(self, error, response):
# The "Code" value can come from either a response
# header or a value in the JSON body.
body = self._initial_body_parse(response['body'])
if 'x-amzn-errortype' in response['headers']:
code = response['headers']['x-amzn-errortype']
# Could be:
# x-amzn-errortype: ValidationException:
code = code.split(':')[0]
error['Error']['Code'] = code
elif 'code' in body or 'Code' in body:
error['Error']['Code'] = body.get(
'code', body.get('Code', ''))
class RestXMLParser(BaseRestParser, BaseXMLResponseParser):
EVENT_STREAM_PARSER_CLS = EventStreamXMLParser
def _initial_body_parse(self, xml_string):
if not xml_string:
return ETree.Element('')
return self._parse_xml_string_to_dom(xml_string)
def _do_error_parse(self, response, shape):
# We're trying to be service agnostic here, but S3 does have a slightly
# different response structure for its errors compared to other
# rest-xml serivces (route53/cloudfront). We handle this by just
# trying to parse both forms.
# First:
# <ErrorResponse xmlns="...">
# <Error>
# <Type>Sender</Type>
# <Code>InvalidInput</Code>
# <Message>Invalid resource type: foo</Message>
# </Error>
# <RequestId>request-id</RequestId>
# </ErrorResponse>
if response['body']:
# If the body ends up being invalid xml, the xml parser should not
# blow up. It should at least try to pull information about the
# the error response from other sources like the HTTP status code.
try:
return self._parse_error_from_body(response)
except ResponseParserError as e:
LOG.debug(
'Exception caught when parsing error response body:',
exc_info=True)
return self._parse_error_from_http_status(response)
def _parse_error_from_http_status(self, response):
return {
'Error': {
'Code': str(response['status_code']),
'Message': six.moves.http_client.responses.get(
response['status_code'], ''),
},
'ResponseMetadata': {
'RequestId': response['headers'].get('x-amz-request-id', ''),
'HostId': response['headers'].get('x-amz-id-2', ''),
}
}
def _parse_error_from_body(self, response):
xml_contents = response['body']
root = self._parse_xml_string_to_dom(xml_contents)
parsed = self._build_name_to_xml_node(root)
self._replace_nodes(parsed)
if root.tag == 'Error':
# This is an S3 error response. First we'll populate the
# response metadata.
metadata = self._populate_response_metadata(response)
# The RequestId and the HostId are already in the
# ResponseMetadata, but are also duplicated in the XML
# body. We don't need these values in both places,
# we'll just remove them from the parsed XML body.
parsed.pop('RequestId', '')
parsed.pop('HostId', '')
return {'Error': parsed, 'ResponseMetadata': metadata}
elif 'RequestId' in parsed:
# Other rest-xml serivces:
parsed['ResponseMetadata'] = {'RequestId': parsed.pop('RequestId')}
default = {'Error': {'Message': '', 'Code': ''}}
merge_dicts(default, parsed)
return default
@_text_content
def _handle_string(self, shape, text):
text = super(RestXMLParser, self)._handle_string(shape, text)
return text
PROTOCOL_PARSERS = {
'ec2': EC2QueryParser,
'query': QueryParser,
'json': JSONParser,
'rest-json': RestJSONParser,
'rest-xml': RestXMLParser,
}
| |
import operator
import re
from functools import partial
from django.apps import apps
from django.db import connections
from wagtail.search.index import RelatedFields, SearchField
from .query import MATCH_NONE, Phrase, PlainText
NOT_SET = object()
def balanced_reduce(operator, seq, initializer=NOT_SET):
"""
Has the same result as Python's reduce function, but performs the calculations in a different order.
This is important when the operator is constructing data structures such as search query clases.
This method will make the resulting data structures flatter, so operations that need to traverse
them don't end up crashing with recursion errors.
For example:
Python's builtin reduce() function will do the following calculation:
reduce(add, [1, 2, 3, 4, 5, 6, 7, 8])
(1 + (2 + (3 + (4 + (5 + (6 + (7 + 8)))))))
When using this with query classes, it would create a large data structure with a depth of 7
Whereas balanced_reduce will execute this like so:
balanced_reduce(add, [1, 2, 3, 4, 5, 6, 7, 8])
((1 + 2) + (3 + 4)) + ((5 + 6) + (7 + 8))
Which only has a depth of 2
"""
# Casting all iterables to list makes the implementation simpler
if not isinstance(seq, list):
seq = list(seq)
# Note, it needs to be possible to use None as an initial value
if initializer is not NOT_SET:
if len(seq) == 0:
return initializer
else:
return operator(initializer, balanced_reduce(operator, seq))
if len(seq) == 0:
raise TypeError("reduce() of empty sequence with no initial value")
elif len(seq) == 1:
return seq[0]
else:
break_point = len(seq) // 2
first_set = balanced_reduce(operator, seq[:break_point])
second_set = balanced_reduce(operator, seq[break_point:])
return operator(first_set, second_set)
# Reduce any iterable to a single value using a logical OR e.g. (a | b | ...)
OR = partial(balanced_reduce, operator.or_)
# Reduce any iterable to a single value using a logical AND e.g. (a & b & ...)
AND = partial(balanced_reduce, operator.and_)
# Reduce any iterable to a single value using an addition
ADD = partial(balanced_reduce, operator.add)
# Reduce any iterable to a single value using a multiplication
MUL = partial(balanced_reduce, operator.mul)
MAX_QUERY_STRING_LENGTH = 255
def normalise_query_string(query_string):
# Truncate query string
if len(query_string) > MAX_QUERY_STRING_LENGTH:
query_string = query_string[:MAX_QUERY_STRING_LENGTH]
# Convert query_string to lowercase
query_string = query_string.lower()
# Remove leading, trailing and multiple spaces
query_string = re.sub(" +", " ", query_string).strip()
return query_string
def separate_filters_from_query(query_string):
filters_regexp = r'(\w+):(\w+|".+")'
filters = {}
for match_object in re.finditer(filters_regexp, query_string):
key, value = match_object.groups()
filters[key] = value.strip('"')
query_string = re.sub(filters_regexp, "", query_string).strip()
return filters, query_string
def parse_query_string(query_string, operator=None, zero_terms=MATCH_NONE):
"""
This takes a query string typed in by a user and extracts the following:
- Quoted terms (for phrase search)
- Filters
For example, the following query:
`hello "this is a phrase" live:true` would be parsed into:
filters: {'live': 'true'}
tokens: And([PlainText('hello'), Phrase('this is a phrase')])
"""
filters, query_string = separate_filters_from_query(query_string)
is_phrase = False
tokens = []
for part in query_string.split('"'):
part = part.strip()
if part:
if is_phrase:
tokens.append(Phrase(part))
else:
tokens.append(
PlainText(part, operator=operator or PlainText.DEFAULT_OPERATOR)
)
is_phrase = not is_phrase
if tokens:
if operator == "or":
search_query = OR(tokens)
else:
search_query = AND(tokens)
else:
search_query = zero_terms
return filters, search_query
def get_descendant_models(model):
"""
Returns all descendants of a model, including the model itself.
"""
descendant_models = {
other_model
for other_model in apps.get_models()
if issubclass(other_model, model)
}
descendant_models.add(model)
return descendant_models
def get_content_type_pk(model):
# We import it locally because this file is loaded before apps are ready.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(model).pk
def get_ancestors_content_types_pks(model):
"""
Returns content types ids for the ancestors of this model, excluding it.
"""
from django.contrib.contenttypes.models import ContentType
return [
ct.pk
for ct in ContentType.objects.get_for_models(
*model._meta.get_parent_list()
).values()
]
def get_descendants_content_types_pks(model):
"""
Returns content types ids for the descendants of this model, including it.
"""
from django.contrib.contenttypes.models import ContentType
return [
ct.pk
for ct in ContentType.objects.get_for_models(
*get_descendant_models(model)
).values()
]
def get_search_fields(search_fields):
for search_field in search_fields:
if isinstance(search_field, SearchField):
yield search_field
elif isinstance(search_field, RelatedFields):
for sub_field in get_search_fields(search_field.fields):
yield sub_field
def get_postgresql_connections():
return [
connection
for connection in connections.all()
if connection.vendor == "postgresql"
]
| |
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""googledatastore connection test suite."""
__author__ = 'proppy@google.com (Johan Euphrosine)'
import os
import threading
import unittest
import mox
import googledatastore as datastore
from googledatastore import helper
class TestResponse(object):
def __init__(self, status, reason):
self.status = status
self.reason = reason
class FakeCredentialsFromEnv(object):
def authorize(self, http):
pass
class FakeCredentials(object):
def authorize(self, http):
pass
class DatastoreTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.conn = datastore.Datastore(
project_endpoint='https://example.com/datastore/v1beta3/projects/foo')
def tearDown(self):
self.mox.UnsetStubs()
self.mox.ResetAll()
def makeLookupRequest(self):
request = datastore.LookupRequest()
key = request.keys.add()
path = key.path.add()
path.kind = 'Greeting0'
path.name = 'foo0'
return request
def makeLookupResponse(self):
response = datastore.LookupResponse()
entity_result = response.found.add()
path = entity_result.entity.key.path.add()
path.kind = 'Greeting0'
path.name = 'foo0'
return response
def makeExpectedHeaders(self, payload):
return {
'Content-Type': 'application/x-protobuf',
'Content-Length': str(len(payload)),
'X-Goog-Api-Format-Version': '2',
}
def expectRequest(self, *args, **kwargs):
self.mox.StubOutWithMock(self.conn._http, 'request')
return self.conn._http.request(*args, **kwargs)
def testProjectIdRequired(self):
self.assertRaises(TypeError, datastore.Datastore, None)
self.assertRaises(TypeError, datastore.Datastore, None, port=8080)
def testLookupSuccess(self):
request = self.makeLookupRequest()
payload = request.SerializeToString()
response = self.makeLookupResponse()
self.expectRequest(
'https://example.com/datastore/v1beta3/projects/foo:lookup',
method='POST', body=payload,
headers=self.makeExpectedHeaders(payload)).AndReturn((
TestResponse(status=200, reason='Found'),
response.SerializeToString()))
self.mox.ReplayAll()
resp = self.conn.lookup(request)
self.assertEqual(response, resp)
self.mox.VerifyAll()
def testLookupFailure(self):
request = self.makeLookupRequest()
payload = request.SerializeToString()
response = datastore.Status()
response.code = 3 # Code.INVALID_ARGUMENT
response.message = 'An error message.'
self.expectRequest(
'https://example.com/datastore/v1beta3/projects/foo:lookup',
method='POST',
body=payload,
headers=self.makeExpectedHeaders(payload)).AndReturn((
TestResponse(status=400, reason='IGNORED'),
response.SerializeToString()))
self.mox.ReplayAll()
with self.assertRaisesRegexp(
datastore.RPCError,
'datastore call lookup failed: '
'Error code: INVALID_ARGUMENT. Message: An error message.'):
self.conn.lookup(request)
self.mox.VerifyAll()
def testLookupFailureWithNonStatus(self):
request = self.makeLookupRequest()
payload = request.SerializeToString()
self.expectRequest(
'https://example.com/datastore/v1beta3/projects/foo:lookup',
method='POST',
body=payload,
headers=self.makeExpectedHeaders(payload)).AndReturn((
TestResponse(status=400, reason='IGNORED'),
'There was an error.'))
self.mox.ReplayAll()
with self.assertRaisesRegexp(
datastore.RPCError,
'datastore call lookup failed: '
'HTTP status code: 400. Message: There was an error'):
self.conn.lookup(request)
self.mox.VerifyAll()
def testRunQuery(self):
request = datastore.RunQueryRequest()
request.query.kind.add().name = 'Foo'
payload = request.SerializeToString()
response = datastore.RunQueryResponse()
self.expectRequest(
'https://example.com/datastore/v1beta3/projects/foo:runQuery',
method='POST', body=payload,
headers=self.makeExpectedHeaders(payload)).AndReturn((
TestResponse(status=200, reason='Found'),
response.SerializeToString()))
self.mox.ReplayAll()
resp = self.conn.run_query(request)
self.assertEqual(response, resp)
self.mox.VerifyAll()
def testBeginTransaction(self):
request = datastore.BeginTransactionRequest()
payload = request.SerializeToString()
response = datastore.BeginTransactionResponse()
self.expectRequest(
'https://example.com/datastore/v1beta3/projects/foo:beginTransaction',
method='POST', body=payload,
headers=self.makeExpectedHeaders(payload)).AndReturn((
TestResponse(status=200, reason='Found'),
response.SerializeToString()))
self.mox.ReplayAll()
resp = self.conn.begin_transaction(request)
self.assertEqual(response, resp)
self.mox.VerifyAll()
def testCommit(self):
request = datastore.CommitRequest()
request.transaction = 'transaction-id'
payload = request.SerializeToString()
response = datastore.CommitResponse()
self.expectRequest(
'https://example.com/datastore/v1beta3/projects/foo:commit',
method='POST', body=payload,
headers=self.makeExpectedHeaders(payload)).AndReturn((
TestResponse(status=200, reason='Found'),
response.SerializeToString()))
self.mox.ReplayAll()
resp = self.conn.commit(request)
self.assertEqual(response, resp)
self.mox.VerifyAll()
def testRollback(self):
request = datastore.RollbackRequest()
request.transaction = 'transaction-id'
payload = request.SerializeToString()
response = datastore.RollbackResponse()
self.expectRequest(
'https://example.com/datastore/v1beta3/projects/foo:rollback',
method='POST', body=payload,
headers=self.makeExpectedHeaders(payload)).AndReturn((
TestResponse(status=200, reason='Found'),
response.SerializeToString()))
self.mox.ReplayAll()
resp = self.conn.rollback(request)
self.assertEqual(response, resp)
self.mox.VerifyAll()
def testAllocateIds(self):
request = datastore.AllocateIdsRequest()
payload = request.SerializeToString()
response = datastore.AllocateIdsResponse()
self.expectRequest(
'https://example.com/datastore/v1beta3/projects/foo:allocateIds',
method='POST', body=payload,
headers=self.makeExpectedHeaders(payload)).AndReturn((
TestResponse(status=200, reason='Found'),
response.SerializeToString()))
self.mox.ReplayAll()
resp = self.conn.allocate_ids(request)
self.assertEqual(response, resp)
self.mox.VerifyAll()
def testDefaultBaseUrl(self):
self.conn = datastore.Datastore(project_id='foo')
request = self.makeLookupRequest()
payload = request.SerializeToString()
response = self.makeLookupResponse()
self.expectRequest(
'https://datastore.googleapis.com/v1beta3/projects/foo:lookup',
method='POST', body=payload,
headers=self.makeExpectedHeaders(payload)).AndReturn((
TestResponse(status=200, reason='Found'),
response.SerializeToString()))
self.mox.ReplayAll()
resp = self.conn.lookup(request)
self.assertEqual(response, resp)
self.mox.VerifyAll()
def testSetOptions(self):
other_thread_conn = []
lock1 = threading.Lock()
lock2 = threading.Lock()
lock1.acquire()
lock2.acquire()
def target():
# Grab two connections
other_thread_conn.append(datastore.get_default_connection())
other_thread_conn.append(datastore.get_default_connection())
lock1.release() # Notify that we have grabbed the first 2 connections.
lock2.acquire() # Wait for the signal to grab the 3rd.
other_thread_conn.append(datastore.get_default_connection())
other_thread = threading.Thread(target=target)
# Resetting options and state.
datastore._options = {}
datastore.set_options(project_id='foo')
self.mox.StubOutWithMock(helper, 'get_credentials_from_env')
self.mox.StubOutWithMock(helper, 'get_project_endpoint_from_env')
endpoint = 'http://localhost:8080/datastore/v1beta3/projects/%s'
helper.get_project_endpoint_from_env(project_id='foo').AndReturn(
endpoint % 'foo')
helper.get_project_endpoint_from_env(project_id='foo').AndReturn(
endpoint % 'foo')
helper.get_project_endpoint_from_env(project_id='bar').AndReturn(
endpoint % 'bar')
helper.get_project_endpoint_from_env(project_id='bar').AndReturn(
endpoint % 'bar')
helper.get_credentials_from_env().AndReturn(FakeCredentialsFromEnv())
self.mox.ReplayAll()
# Start the thread and wait for the first lock.
other_thread.start()
lock1.acquire()
t1_conn1 = datastore.get_default_connection()
t2_conn1, t2_conn1b = other_thread_conn
other_thread_conn = []
# The two threads get different connections.
self.assertIsNot(t1_conn1, t2_conn1)
# Multiple calls on the same thread get the same connection.
self.assertIs(t1_conn1, datastore.get_default_connection())
self.assertIs(t2_conn1, t2_conn1b)
# Change the global options and grab the connections again.
datastore.set_options(project_id='bar')
lock2.release()
other_thread.join()
t1_conn2 = datastore.get_default_connection()
t2_conn2 = other_thread_conn[0]
# Changing the options causes all threads to create new connections.
self.assertIsNot(t1_conn1, t1_conn2)
self.assertIsNot(t2_conn1, t2_conn2)
# The new connections are still different for each thread.
self.assertIsNot(t1_conn2, t2_conn2)
# The old connections has the old settings.
self.assertEqual('http://localhost:8080/datastore/v1beta3/projects/foo',
t1_conn1._url)
self.assertEqual('http://localhost:8080/datastore/v1beta3/projects/foo',
t2_conn1._url)
# The new connections has the new settings.
self.assertEqual('http://localhost:8080/datastore/v1beta3/projects/bar',
t1_conn2._url)
self.assertEqual('http://localhost:8080/datastore/v1beta3/projects/bar',
t2_conn2._url)
self.assertEqual(FakeCredentialsFromEnv, type(t1_conn2._credentials))
self.assertEqual(FakeCredentialsFromEnv, type(t2_conn2._credentials))
self.mox.VerifyAll()
def testFunctions(self):
datastore.set_options(
credentials=FakeCredentialsFromEnv(),
project_endpoint='http://localhost:8080/datastore/v1beta3/projects/foo')
def caml(s): return ''.join(p[0].upper()+p[1:] for p in s.split('_'))
rpcs = ['lookup', 'run_query', 'begin_transaction',
'commit', 'rollback', 'allocate_ids']
methods = [(r, getattr(datastore, caml(r)+'Request'),
getattr(datastore, caml(r)+'Response'))
for r in rpcs]
conn = datastore.get_default_connection()
for m, req_class, resp_class in methods:
self.mox.StubOutWithMock(conn, m)
method = getattr(conn, m)
method(mox.IsA(req_class)).AndReturn(resp_class())
self.mox.ReplayAll()
for m, req_class, resp_class in methods:
method = getattr(datastore, m)
result = method(req_class())
self.assertEqual(resp_class, type(result))
self.mox.VerifyAll()
if __name__ == '__main__':
unittest.main()
| |
"""
Provides an APIView class that is the base of all views in REST framework.
"""
from __future__ import unicode_literals
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from rest_framework import status, exceptions
from rest_framework.compat import HttpResponseBase, View, set_rollback
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.utils import formatting
import inspect
import warnings
def get_view_name(view_cls, suffix=None):
"""
Given a view class, return a textual name to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_NAME_FUNCTION` setting.
"""
name = view_cls.__name__
name = formatting.remove_trailing_string(name, 'View')
name = formatting.remove_trailing_string(name, 'ViewSet')
name = formatting.camelcase_to_spaces(name)
if suffix:
name += ' ' + suffix
return name
def get_view_description(view_cls, html=False):
"""
Given a view class, return a textual description to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_DESCRIPTION_FUNCTION` setting.
"""
description = view_cls.__doc__ or ''
description = formatting.dedent(smart_text(description))
if html:
return formatting.markup_description(description)
return description
def exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'detail': exc.detail}
set_rollback()
return Response(data, status=exc.status_code, headers=headers)
elif isinstance(exc, Http404):
msg = _('Not found.')
data = {'detail': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, PermissionDenied):
msg = _('Permission denied.')
data = {'detail': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_403_FORBIDDEN)
# Note: Unhandled exceptions will raise a 500 error.
return None
class APIView(View):
# The following policies may be set at either globally, or per-view.
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
parser_classes = api_settings.DEFAULT_PARSER_CLASSES
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES
throttle_classes = api_settings.DEFAULT_THROTTLE_CLASSES
permission_classes = api_settings.DEFAULT_PERMISSION_CLASSES
content_negotiation_class = api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS
metadata_class = api_settings.DEFAULT_METADATA_CLASS
versioning_class = api_settings.DEFAULT_VERSIONING_CLASS
# Allow dependency injection of other settings to make testing easier.
settings = api_settings
@classmethod
def as_view(cls, **initkwargs):
"""
Store the original class on the view function.
This allows us to discover information about the view when we do URL
reverse lookups. Used for breadcrumb generation.
"""
view = super(APIView, cls).as_view(**initkwargs)
view.cls = cls
# Note: session based authentication is explicitly CSRF validated,
# all other authentication is CSRF exempt.
return csrf_exempt(view)
@property
def allowed_methods(self):
"""
Wrap Django's private `_allowed_methods` interface in a public property.
"""
return self._allowed_methods()
@property
def default_response_headers(self):
headers = {
'Allow': ', '.join(self.allowed_methods),
}
if len(self.renderer_classes) > 1:
headers['Vary'] = 'Accept'
return headers
def http_method_not_allowed(self, request, *args, **kwargs):
"""
If `request.method` does not correspond to a handler method,
determine what kind of exception to raise.
"""
raise exceptions.MethodNotAllowed(request.method)
def permission_denied(self, request):
"""
If request is not permitted, determine what kind of exception to raise.
"""
if not request.successful_authenticator:
raise exceptions.NotAuthenticated()
raise exceptions.PermissionDenied()
def throttled(self, request, wait):
"""
If request is throttled, determine what kind of exception to raise.
"""
raise exceptions.Throttled(wait)
def get_authenticate_header(self, request):
"""
If a request is unauthenticated, determine the WWW-Authenticate
header to use for 401 responses, if any.
"""
authenticators = self.get_authenticators()
if authenticators:
return authenticators[0].authenticate_header(request)
def get_parser_context(self, http_request):
"""
Returns a dict that is passed through to Parser.parse(),
as the `parser_context` keyword argument.
"""
# Note: Additionally `request` and `encoding` will also be added
# to the context by the Request object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {})
}
def get_renderer_context(self):
"""
Returns a dict that is passed through to Renderer.render(),
as the `renderer_context` keyword argument.
"""
# Note: Additionally 'response' will also be added to the context,
# by the Response object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {}),
'request': getattr(self, 'request', None)
}
def get_exception_handler_context(self):
"""
Returns a dict that is passed through to EXCEPTION_HANDLER,
as the `context` argument.
"""
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {}),
'request': getattr(self, 'request', None)
}
def get_view_name(self):
"""
Return the view name, as used in OPTIONS responses and in the
browsable API.
"""
func = self.settings.VIEW_NAME_FUNCTION
return func(self.__class__, getattr(self, 'suffix', None))
def get_view_description(self, html=False):
"""
Return some descriptive text for the view, as used in OPTIONS responses
and in the browsable API.
"""
func = self.settings.VIEW_DESCRIPTION_FUNCTION
return func(self.__class__, html)
# API policy instantiation methods
def get_format_suffix(self, **kwargs):
"""
Determine if the request includes a '.json' style format suffix
"""
if self.settings.FORMAT_SUFFIX_KWARG:
return kwargs.get(self.settings.FORMAT_SUFFIX_KWARG)
def get_renderers(self):
"""
Instantiates and returns the list of renderers that this view can use.
"""
return [renderer() for renderer in self.renderer_classes]
def get_parsers(self):
"""
Instantiates and returns the list of parsers that this view can use.
"""
return [parser() for parser in self.parser_classes]
def get_authenticators(self):
"""
Instantiates and returns the list of authenticators that this view can use.
"""
return [auth() for auth in self.authentication_classes]
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
return [permission() for permission in self.permission_classes]
def get_throttles(self):
"""
Instantiates and returns the list of throttles that this view uses.
"""
return [throttle() for throttle in self.throttle_classes]
def get_content_negotiator(self):
"""
Instantiate and return the content negotiation class to use.
"""
if not getattr(self, '_negotiator', None):
self._negotiator = self.content_negotiation_class()
return self._negotiator
# API policy implementation methods
def perform_content_negotiation(self, request, force=False):
"""
Determine which renderer and media type to use render the response.
"""
renderers = self.get_renderers()
conneg = self.get_content_negotiator()
try:
return conneg.select_renderer(request, renderers, self.format_kwarg)
except Exception:
if force:
return (renderers[0], renderers[0].media_type)
raise
def perform_authentication(self, request):
"""
Perform authentication on the incoming request.
Note that if you override this and simply 'pass', then authentication
will instead be performed lazily, the first time either
`request.user` or `request.auth` is accessed.
"""
request.user
def check_permissions(self, request):
"""
Check if the request should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
if not permission.has_permission(request, self):
self.permission_denied(request)
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
if not permission.has_object_permission(request, self, obj):
self.permission_denied(request)
def check_throttles(self, request):
"""
Check if request should be throttled.
Raises an appropriate exception if the request is throttled.
"""
for throttle in self.get_throttles():
if not throttle.allow_request(request, self):
self.throttled(request, throttle.wait())
def determine_version(self, request, *args, **kwargs):
"""
If versioning is being used, then determine any API version for the
incoming request. Returns a two-tuple of (version, versioning_scheme)
"""
if self.versioning_class is None:
return (None, None)
scheme = self.versioning_class()
return (scheme.determine_version(request, *args, **kwargs), scheme)
# Dispatch methods
def initialize_request(self, request, *args, **kwargs):
"""
Returns the initial request object.
"""
parser_context = self.get_parser_context(request)
return Request(
request,
parsers=self.get_parsers(),
authenticators=self.get_authenticators(),
negotiator=self.get_content_negotiator(),
parser_context=parser_context
)
def initial(self, request, *args, **kwargs):
"""
Runs anything that needs to occur prior to calling the method handler.
"""
self.format_kwarg = self.get_format_suffix(**kwargs)
# Ensure that the incoming request is permitted
self.perform_authentication(request)
self.check_permissions(request)
self.check_throttles(request)
# Perform content negotiation and store the accepted info on the request
neg = self.perform_content_negotiation(request)
request.accepted_renderer, request.accepted_media_type = neg
# Determine the API version, if versioning is in use.
version, scheme = self.determine_version(request, *args, **kwargs)
request.version, request.versioning_scheme = version, scheme
def finalize_response(self, request, response, *args, **kwargs):
"""
Returns the final response object.
"""
# Make the error obvious if a proper response is not returned
assert isinstance(response, HttpResponseBase), (
'Expected a `Response`, `HttpResponse` or `HttpStreamingResponse` '
'to be returned from the view, but received a `%s`'
% type(response)
)
if isinstance(response, Response):
if not getattr(request, 'accepted_renderer', None):
neg = self.perform_content_negotiation(request, force=True)
request.accepted_renderer, request.accepted_media_type = neg
response.accepted_renderer = request.accepted_renderer
response.accepted_media_type = request.accepted_media_type
response.renderer_context = self.get_renderer_context()
for key, value in self.headers.items():
response[key] = value
return response
def handle_exception(self, exc):
"""
Handle any exception that occurs, by returning an appropriate response,
or re-raising the error.
"""
if isinstance(exc, (exceptions.NotAuthenticated,
exceptions.AuthenticationFailed)):
# WWW-Authenticate header for 401 responses, else coerce to 403
auth_header = self.get_authenticate_header(self.request)
if auth_header:
exc.auth_header = auth_header
else:
exc.status_code = status.HTTP_403_FORBIDDEN
exception_handler = self.settings.EXCEPTION_HANDLER
if len(inspect.getargspec(exception_handler).args) == 1:
warnings.warn(
'The `exception_handler(exc)` call signature is deprecated. '
'Use `exception_handler(exc, context) instead.',
DeprecationWarning
)
response = exception_handler(exc)
else:
context = self.get_exception_handler_context()
response = exception_handler(exc, context)
if response is None:
raise
response.exception = True
return response
# Note: Views are made CSRF exempt from within `as_view` as to prevent
# accidental removal of this exemption in cases where `dispatch` needs to
# be overridden.
def dispatch(self, request, *args, **kwargs):
"""
`.dispatch()` is pretty much the same as Django's regular dispatch,
but with extra hooks for startup, finalize, and exception handling.
"""
self.args = args
self.kwargs = kwargs
request = self.initialize_request(request, *args, **kwargs)
self.request = request
self.headers = self.default_response_headers # deprecate?
try:
self.initial(request, *args, **kwargs)
# Get the appropriate handler method
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
response = handler(request, *args, **kwargs)
except Exception as exc:
response = self.handle_exception(exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
def options(self, request, *args, **kwargs):
"""
Handler method for HTTP 'OPTIONS' request.
"""
if self.metadata_class is None:
return self.http_method_not_allowed(request, *args, **kwargs)
data = self.metadata_class().determine_metadata(request, self)
return Response(data, status=status.HTTP_200_OK)
| |
#!/usr/bin/env python
from multiprocessing import Pool
import numpy as np
from gtapps_mp.utils import pyfits
import tempfile
import os
import subprocess
import sys
from gt_apps import filter,expCube
def ltcube(times):
'''This is the atomic function that actually runs in the seperate
threads. It takes a list as input where the first element is
tmin, second is tmax, third is spacecraft file, fourth is the
event file and fifth is the zmax parameter. It first uses
gtselect with wide open cuts to divide up the event file then it
runs gtltcube on that event file. The temporary event file is
deleted automatically. The function returns the name of the
created ltcube file which can be combined with other files and/or
deleted later.'''
print "Starting calculation on interval {} to {}".format(times[0],times[1])
if times[3] != '':
evfile = tempfile.NamedTemporaryFile(suffix=".fits")
filter['ra']="INDEF"
filter['dec']="INDEF"
filter['rad'] = "INDEF"
filter['evclass'] = 0
filter['evtype'] = "INDEF"
filter['infile'] = times[3]
filter['outfile'] = evfile.name
filter['ra'] = "INDEF"
filter['dec'] = "INDEF"
filter['tmin'] = times[0]
filter['tmax'] = times[1]
filter['emin'] = 0.0
filter['emax'] = 1000000.0
filter['zmin'] = 0
filter['zmax'] = 180
filter['convtype'] = -1
filter['chatter'] = 0
filter.run(print_command=True)
osfilehandle,outfilename = tempfile.mkstemp(suffix=".fits")
if times[3] != '':
expCube['evfile'] = evfile.name
else:
expCube['evfile'] = ""
expCube['scfile'] = times[2]
expCube['outfile'] = outfilename
expCube['tmin'] = times[0]
expCube['tmax'] = times[1]
expCube['dcostheta'] = 0.025
expCube['binsz'] = 1
expCube['phibins'] = 0
expCube['zmax'] = times[4]
expCube['chatter'] = 0
expCube.run(print_command=True)
print "Completed calculation on interval {} to {}".format(times[0],times[1])
return outfilename
def ltsum(filenames, Outfile, SaveTemp):
'''This function takes a list of livetime cubes and sums them up using
gtltsum. It first checks to see if there's only one temporary
file. If so, it just copies that to the output file. If not, it
creates a temporary file that lists the individual ltcube files
and operates gtltsum on them.'''
if len(filenames) <= 1:
subprocess.call(["cp", filenames[0], Outfile])
else:
fileListfile = tempfile.NamedTemporaryFile()
for filename in filenames:
fileListfile.file.write(filename + "\n")
fileListfile.flush()
subprocess.call(["gtltsum",
"infile1=@"+fileListfile.name,
"outfile="+Outfile])
if SaveTemp:
print "Did not delete the following temporary files:"
print filenames
else:
print "Deleting temporary files..."
for filename in filenames:
os.remove(filename)
def gtltcube_mp(bins, SCFile, EVFile, OutFile, SaveTemp, zmax, tmin, tmax):
'''This functions looks at a spacecraft file and splits the time into
chunks that match the bin edges in the spacecraft file. It then
submits jobs based upon those start and stop times. This is to
make the resulting files as close to the original as possible.
Note that this assumes you are using the full time period in your
spacecraft file.'''
verbose = False
if EVFile != "":
evfile = pyfits.open(EVFile, mode='readonly')
gti_data = evfile[2].data
if tmin == 0:
print "Determining start and stop times from the event file..."
tstart = evfile[0].header['TSTART']
tstop = evfile[0].header['TSTOP']
else:
print "Using user defined tmin and tmax..."
tstart = tmin
tstop = tmax
print "Opening SC file to determine break points..."
hdulist = pyfits.open(SCFile, mode='readonly')
scdata = hdulist[1].data
hdulist.close()
scstart = scdata.field('START')
scstop = scdata.field('STOP')
time_filter = (tstart <= scstart) & (scstop <= tstop)
redo = True
if EVFile !="":
print "Checking for good times in the event file..."
while redo:
redo = False
scstartssplit = np.array_split(scstart[time_filter],int(bins))
scstopssplit = np.array_split(scstop[time_filter],bins)
#Explicitly set the first and last point to the values in the evfile header
scstartssplit[0][0] = tstart
scstopssplit[-1][-1] = tstop
starts = [st[0] for st in scstartssplit]
stops = [st[-1] for st in scstopssplit]
if EVFile != "":
for interval in zip(starts,stops):
if verbose: print "Looking at interval",interval[0],"to",interval[1]
good_times = False
#grrrr. some bug in pyfits doesn't let me do this the python way...
for gti_i in range(len(gti_data)):
if(not good_times):
if verbose: print " Checking gti",gti_data[gti_i]['START'],"to",gti_data[gti_i]['STOP']
gti_starts = interval[0] <= gti_data[gti_i]['START'] <= interval[1]
gti_stops = interval[0] <= gti_data[gti_i]['STOP'] <= interval[1]
if verbose: print " Does this gti start inside this interval? ", gti_starts
if verbose: print " Does this gti stop inside this interval? ", gti_stops
good_times = gti_starts or gti_stops
if verbose: print
if verbose: print " Are there good times inside this interval? ", good_times
if not good_times:
redo = True
if verbose: print
if redo:
if bins <= 1:
print "No good time intervals found. Bailing..."
sys.exit(1)
print "One (or more) of the slices doesn't have a GTI."
print "Reducing the number of threads from ",bins,"to",bins-1
bins -= 1
scfiles = [SCFile for st in scstartssplit]
evfiles = [EVFile for st in scstartssplit]
print "EVFiles:",evfiles
zmaxes = [zmax for st in scstartssplit]
pool = Pool(processes=bins)
times = np.array([starts,stops,scfiles,evfiles,zmaxes])
print "Spawning {} jobs...".format(bins)
tempfilenames = pool.map(ltcube,times.transpose())
print "Combining temporary files..."
ltsum(tempfilenames, OutFile, SaveTemp)
def cli():
helpString = "Submits the gtltcube program as sperate threads via python and\
joins up the resulting temporary exposure cubes at the end\
resulting in a single exposure cube for the input event file.\
This greatly reduces the running time. For more details on \
gtltcube see the gtltcube help file."
import argparse
parser = argparse.ArgumentParser(description=helpString)
parser.add_argument("jobs", type=int, help="The number of jobs you wish to spawn (usually the number of cores on your machine).")
parser.add_argument("sfile", help="The spacecraft data file. See gtltcube help for more information.")
parser.add_argument("evfile", help="Input event file. See gtltcube help for more information.")
parser.add_argument("outfile", help="Output file name.")
parser.add_argument("--savetmp", default = False, help="Save the temporary files (default is False).")
parser.add_argument("--zmax", type=int, default = 180, help="zmax parameter for gtltcube (default is 180)")
parser.add_argument("--tmin", type=float, default=0, help="start time (if not given, will derive from evfile)")
parser.add_argument("--tmax", type=float, default=0, help="stop time (if not given, will derive from evfile)")
args = parser.parse_args()
gtltcube_mp(args.jobs, args.sfile, args.evfile, args.outfile, args.savetmp, args.zmax, args.tmin, args.tmax)
if __name__ == '__main__': cli()
| |
import traceback, sys
from unittest import TestResult
import datetime
from tcmessages import TeamcityServiceMessages
PYTHON_VERSION_MAJOR = sys.version_info[0]
def strclass(cls):
if not cls.__name__:
return cls.__module__
return "%s.%s" % (cls.__module__, cls.__name__)
def smart_str(s):
encoding = 'utf-8'
errors = 'strict'
if PYTHON_VERSION_MAJOR < 3:
is_string = isinstance(s, basestring)
else:
is_string = isinstance(s, str)
if not is_string:
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
else:
return s
class TeamcityTestResult(TestResult):
def __init__(self, stream=sys.stdout, *args, **kwargs):
TestResult.__init__(self)
for arg, value in kwargs.items():
setattr(self, arg, value)
self.output = stream
self.messages = TeamcityServiceMessages(self.output, prepend_linebreak=True)
self.messages.testMatrixEntered()
self.current_failed = False
self.current_suite = None
self.subtest_suite = None
def find_first(self, val):
quot = val[0]
count = 1
quote_ind = val[count:].find(quot)
while quote_ind != -1 and val[count + quote_ind - 1] == "\\":
count = count + quote_ind + 1
quote_ind = val[count:].find(quot)
return val[0:quote_ind + count + 1]
def find_second(self, val):
val_index = val.find("!=")
if val_index != -1:
count = 1
val = val[val_index + 2:].strip()
quot = val[0]
quote_ind = val[count:].find(quot)
while quote_ind != -1 and val[count + quote_ind - 1] == "\\":
count = count + quote_ind + 1
quote_ind = val[count:].find(quot)
return val[0:quote_ind + count + 1]
else:
quot = val[-1]
quote_ind = val[:len(val) - 1].rfind(quot)
while quote_ind != -1 and val[quote_ind - 1] == "\\":
quote_ind = val[:quote_ind - 1].rfind(quot)
return val[quote_ind:]
def formatErr(self, err):
exctype, value, tb = err
return ''.join(traceback.format_exception(exctype, value, tb))
def getTestName(self, test, is_subtest=False):
if is_subtest:
test_name = self.getTestName(test.test_case)
return "{} {}".format(test_name, test._subDescription())
if hasattr(test, '_testMethodName'):
if test._testMethodName == "runTest":
return str(test)
return test._testMethodName
else:
test_name = str(test)
whitespace_index = test_name.index(" ")
if whitespace_index != -1:
test_name = test_name[:whitespace_index]
return test_name
def getTestId(self, test):
return test.id
def addSuccess(self, test):
TestResult.addSuccess(self, test)
def addError(self, test, err):
location = self.init_suite(test)
self.current_failed = True
TestResult.addError(self, test, err)
err = self._exc_info_to_string(err, test)
self.messages.testStarted(self.getTestName(test), location=location)
self.messages.testError(self.getTestName(test),
message='Error', details=err, duration=self.__getDuration(test))
def find_error_value(self, err):
error_value = traceback.extract_tb(err)
error_value = error_value[-1][-1]
return error_value.split('assert')[-1].strip()
def addFailure(self, test, err):
location = self.init_suite(test)
self.current_failed = True
TestResult.addFailure(self, test, err)
error_value = smart_str(err[1])
if not len(error_value):
# means it's test function and we have to extract value from traceback
error_value = self.find_error_value(err[2])
self_find_first = self.find_first(error_value)
self_find_second = self.find_second(error_value)
quotes = ["'", '"']
if (self_find_first[0] == self_find_first[-1] and self_find_first[0] in quotes and
self_find_second[0] == self_find_second[-1] and self_find_second[0] in quotes):
# let's unescape strings to show sexy multiline diff in PyCharm.
# By default all caret return chars are escaped by testing framework
first = self._unescape(self_find_first)
second = self._unescape(self_find_second)
else:
first = second = ""
err = self._exc_info_to_string(err, test)
self.messages.testStarted(self.getTestName(test), location=location)
duration = self.__getDuration(test)
self.messages.testFailed(self.getTestName(test),
message='Failure', details=err, expected=first, actual=second, duration=duration)
def addSkip(self, test, reason):
self.init_suite(test)
self.current_failed = True
self.messages.testIgnored(self.getTestName(test), message=reason)
def _getSuite(self, test):
if hasattr(test, "suite"):
suite = strclass(test.suite)
suite_location = test.suite.location
location = test.suite.abs_location
if hasattr(test, "lineno"):
location = location + ":" + str(test.lineno)
else:
location = location + ":" + str(test.test.lineno)
else:
import inspect
try:
source_file = inspect.getsourcefile(test.__class__)
if source_file:
source_dir_splitted = source_file.split("/")[:-1]
source_dir = "/".join(source_dir_splitted) + "/"
else:
source_dir = ""
except TypeError:
source_dir = ""
suite = strclass(test.__class__)
suite_location = "python_uttestid://" + source_dir + suite
location = "python_uttestid://" + source_dir + str(test.id())
return (suite, location, suite_location)
def startTest(self, test):
self.current_failed = False
setattr(test, "startTime", datetime.datetime.now())
def init_suite(self, test):
suite, location, suite_location = self._getSuite(test)
if suite != self.current_suite:
if self.current_suite:
self.messages.testSuiteFinished(self.current_suite)
self.current_suite = suite
self.messages.testSuiteStarted(self.current_suite, location=suite_location)
return location
def stopTest(self, test):
duration = self.__getDuration(test)
if not self.subtest_suite:
if not self.current_failed:
location = self.init_suite(test)
self.messages.testStarted(self.getTestName(test), location=location)
self.messages.testFinished(self.getTestName(test), duration=int(duration))
else:
self.messages.testSuiteFinished(self.subtest_suite)
self.subtest_suite = None
def __getDuration(self, test):
start = getattr(test, "startTime", datetime.datetime.now())
d = datetime.datetime.now() - start
duration = d.microseconds / 1000 + d.seconds * 1000 + d.days * 86400000
return duration
def addSubTest(self, test, subtest, err):
suite_name = self.getTestName(test) # + " (subTests)"
if not self.subtest_suite:
self.subtest_suite = suite_name
self.messages.testSuiteStarted(self.subtest_suite)
else:
if suite_name != self.subtest_suite:
self.messages.testSuiteFinished(self.subtest_suite)
self.subtest_suite = suite_name
self.messages.testSuiteStarted(self.subtest_suite)
name = self.getTestName(subtest, True)
if err is not None:
error = self._exc_info_to_string(err, test)
self.messages.testStarted(name)
self.messages.testFailed(name, message='Failure', details=error, duration=None)
else:
self.messages.testStarted(name)
self.messages.testFinished(name)
def endLastSuite(self):
if self.current_suite:
self.messages.testSuiteFinished(self.current_suite)
self.current_suite = None
def _unescape(self, text):
# do not use text.decode('string_escape'), it leads to problems with different string encodings given
return text.replace("\\n", "\n")
class TeamcityTestRunner(object):
def __init__(self, stream=sys.stdout):
self.stream = stream
def _makeResult(self, **kwargs):
return TeamcityTestResult(self.stream, **kwargs)
def run(self, test, **kwargs):
result = self._makeResult(**kwargs)
result.messages.testCount(test.countTestCases())
test(result)
result.endLastSuite()
return result
| |
#!/usr/bin/python2.7
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'kpy@google.com (Ka-Ping Yee) and many other Googlers'
from django_setup import ugettext as _ # always keep this first
import calendar
import cgi
import copy
from datetime import datetime, timedelta
import hmac
import httplib
import logging
import os
import random
import re
import string
import sys
import time
import traceback
import unicodedata
import urllib
import urlparse
import base64
from django.core.validators import EmailValidator, URLValidator, ValidationError
import django.utils.html
from django.template.defaulttags import register
from google.appengine.api import images
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import webapp
import google.appengine.ext.webapp.template
import google.appengine.ext.webapp.util
from recaptcha.client import captcha
from babel.dates import format_date
from babel.dates import format_datetime
from babel.dates import format_time
import babel
import const
import config
import model
import pfif
import resources
# The domain name from which to send e-mail.
EMAIL_DOMAIN = 'appspotmail.com' # All apps on appspot.com use this for mail.
# Query parameters which are automatically preserved on page transition
# if you use utils.BaseHandler.get_url() or
# env.hidden_input_tags_for_preserved_query_params.
PRESERVED_QUERY_PARAM_NAMES = ['ui', 'charsets', 'referrer']
@register.filter
def get_value(dictionary, key):
"""Django Template filter to get dictionary value based on the given key"""
return dictionary.get(key)
# ==== Field value text ========================================================
def get_person_sex_text(person):
"""Returns the UI text for a person's sex field."""
return const.PERSON_SEX_TEXT.get(person.sex or '')
def get_note_status_text(note):
"""Returns the UI text for a note's status field."""
return const.NOTE_STATUS_TEXT.get(note.status or '')
def get_person_status_text(person):
"""Returns the UI text for a person's latest_status."""
return const.PERSON_STATUS_TEXT.get(person.latest_status or '')
# Things that occur as prefixes of global paths (i.e. no repository name).
GLOBAL_PATH_RE = re.compile(r'^/(global|personfinder)(/?|/.*)$')
# ==== String formatting =======================================================
def format_boolean(value):
return value and 'true' or 'false'
def format_utc_datetime(dt):
if not dt:
return ''
return dt.replace(microsecond=0).isoformat() + 'Z'
def format_utc_timestamp(timestamp):
if not isinstance(timestamp, (int, float)):
return ''
return format_utc_datetime(datetime.utcfromtimestamp(timestamp))
def format_sitemaps_datetime(dt):
integer_dt = datetime(
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
return integer_dt.isoformat() + '+00:00'
def encode(string, encoding='utf-8'):
"""If unicode, encode to encoding; if 8-bit string, leave unchanged."""
if isinstance(string, unicode):
string = string.encode(encoding)
return string
def urlencode(params, encoding='utf-8'):
"""Encode the key-value pairs in 'params' into a query string, applying
the specified encoding to any Unicode strings and ignoring any keys that
have value == None. (urllib.urlencode doesn't support Unicode)."""
keys = params.keys()
keys.sort() # Sort the keys to get canonical ordering
return urllib.urlencode([
(encode(key, encoding), encode(params[key], encoding))
for key in keys if isinstance(params[key], basestring)])
def set_param(params, param, value):
"""Take the params from a urlparse and override one of the values."""
# This will strip out None-valued params and collapse repeated params.
params = dict(cgi.parse_qsl(params))
if value is None:
if param in params:
del(params[param])
else:
params[param] = value
return urlencode(params)
def set_url_param(url, param, value):
"""This modifies a URL setting the given param to the specified value. This
may add the param or override an existing value, or, if the value is None,
it will remove the param. Note that value must be a basestring and can't be
an int, for example."""
url_parts = list(urlparse.urlparse(url))
url_parts[4] = set_param(url_parts[4], param, value)
return urlparse.urlunparse(url_parts)
def anchor_start(href):
"""Returns the HREF escaped and embedded in an anchor tag."""
return '<a href="%s">' % django.utils.html.escape(href)
def anchor(href, body):
"""Returns a string anchor HTML element with the given href and body."""
return anchor_start(href) + django.utils.html.escape(body) + '</a>'
# ==== Validators ==============================================================
# These validator functions are used to check and parse query parameters.
# Each validator should return a parsed, sanitized value, or return a default
# value, or raise ValueError to display an error message to the user.
def strip(string):
# Trailing nulls appear in some strange character encodings like Shift-JIS.
return string.strip().rstrip('\0')
def strip_and_lower(string):
return strip(string).lower()
def validate_yes(string):
return (strip(string).lower() == 'yes') and 'yes' or ''
def validate_checkbox(string):
return (strip(string).lower() == 'on') and 'yes' or ''
def validate_checkbox_as_bool(val):
if val.lower() in ['on', 'yes', 'true']:
return True
return False
def validate_role(string):
return (strip(string).lower() == 'provide') and 'provide' or 'seek'
def validate_int(string):
return string and int(strip(string))
def validate_sex(string):
"""Validates the 'sex' parameter, returning a canonical value or ''."""
if string:
string = strip(string).lower()
return string in pfif.PERSON_SEX_VALUES and string or ''
def validate_expiry(value):
"""Validates that the 'expiry_option' parameter is a positive integer.
Returns:
the int() value if it's present and parses, or the default_expiry_days
for the repository, if it's set, otherwise -1 which represents the
'unspecified' status.
"""
try:
value = int(value)
except Exception, e:
return None
return value > 0 and value or None
APPROXIMATE_DATE_RE = re.compile(r'^\d{4}(-\d\d)?(-\d\d)?$')
def validate_approximate_date(string):
if string:
string = strip(string)
if APPROXIMATE_DATE_RE.match(string):
return string
return ''
AGE_RE = re.compile(r'^\d+(-\d+)?$')
# Hyphen with possibly surrounding whitespaces.
HYPHEN_RE = re.compile(
ur'\s*[-\u2010-\u2015\u2212\u301c\u30fc\ufe58\ufe63\uff0d]\s*',
re.UNICODE)
def validate_age(string):
"""Validates the 'age' parameter, returning a canonical value or ''."""
if string:
string = strip(string)
string = unicodedata.normalize('NFKC', unicode(string))
string = HYPHEN_RE.sub('-', string)
if AGE_RE.match(string):
return string
return ''
def validate_status(string):
"""Validates an incoming status parameter, returning one of the canonical
status strings or ''. Note that '' is always used as the Python value
to represent the 'unspecified' status."""
if string:
string = strip(string).lower()
return string in pfif.NOTE_STATUS_VALUES and string or ''
DATETIME_RE = re.compile(r'^(2\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)Z$')
def validate_datetime(string):
if not string:
return None # A missing value is okay.
match = DATETIME_RE.match(string)
if match:
return datetime(*map(int, match.groups()))
raise ValueError('Bad datetime: %r' % string)
def validate_timestamp(string):
try:
return string and datetime.utcfromtimestamp(float(strip(string)))
except:
raise ValueError('Bad timestamp: %s' % string)
def validate_image(bytestring):
try:
image = None
if bytestring:
image = images.Image(bytestring)
image.width
return image
except:
return False
EMAIL_VALIDATOR = EmailValidator()
def validate_email(email):
"""Validates an email address, returning True on correct,
False on incorrect, None on empty string."""
# Note that google.appengine.api.mail.is_email_valid() is unhelpful;
# it checks only for the empty string
if not email:
return None
try:
EMAIL_VALIDATOR(email)
return True
except ValidationError:
return False
def validate_version(string):
"""Version, if present, should be in pfif versions."""
if string and strip(string) not in pfif.PFIF_VERSIONS:
raise ValueError('Bad pfif version: %s' % string)
return pfif.PFIF_VERSIONS[strip(string) or pfif.PFIF_DEFAULT_VERSION]
REPO_RE = re.compile('^[a-z0-9-]+$')
def validate_repo(string):
string = (string or '').strip()
if not string:
return None
if string == 'global':
raise ValueError('"global" is an illegal repository name.')
if REPO_RE.match(string):
return string
raise ValueError('Repository names can only contain '
'lowercase letters, digits, and hyphens.')
RESOURCE_NAME_RE = re.compile('^[a-z0-9._-]+$')
def validate_resource_name(string):
"""A resource name or bundle label."""
string = (string or '').strip().lower()
if not string:
return None
if RESOURCE_NAME_RE.match(string):
return string
raise ValueError('Invalid resource name or bundle name: %r' % string)
LANG_RE = re.compile('^[A-Za-z0-9-]+$')
def validate_lang(string):
"""A BCP 47 language tag."""
string = (string or '').strip().lower()
if not string:
return None
if LANG_RE.match(string):
return string
raise ValueError('Invalid language tag: %r' % string)
def validate_cache_seconds(string):
"""A number of seconds to cache a Resource in RAM."""
string = (string or '').strip()
if string:
return float(string)
return 1.0
# ==== Other utilities =========================================================
def get_app_name():
"""Canonical name of the app, without HR s~ nonsense. This only works in
the context of the appserver (eg remote_api can't use it)."""
from google.appengine.api import app_identity
return app_identity.get_application_id()
def sanitize_urls(record):
"""Clean up URLs to protect against XSS.
We check URLs submitted through Person Finder, but bad data might come in
through the API.
"""
url_validator = URLValidator(schemes=['http', 'https'])
# Single-line URLs.
for field in ['photo_url', 'source_url']:
url = getattr(record, field, None)
if not url:
continue
try:
url_validator(url)
except ValidationError:
setattr(record, field, None)
# Multi-line URLs.
for field in ['profile_urls']:
urls = (getattr(record, field, None) or '').splitlines()
sanitized_urls = []
for url in urls:
if url:
try:
url_validator(url)
sanitized_urls.append(url)
except ValidationError:
logging.warning(
'Unsanitary URL in database on %s' % record.record_id)
if len(urls) != len(sanitized_urls):
setattr(record, field, '\n'.join(sanitized_urls))
def get_host(host=None):
host = host or os.environ['HTTP_HOST']
"""Return the host name, without version specific details."""
parts = host.split('.')
if len(parts) > 3:
return '.'.join(parts[-3:])
else:
return host
# List of sensitive field names in person and note records.
SENSITIVE_FIELDS = [
'date_of_birth',
'author_email',
'author_phone',
'email_of_found_person',
'phone_of_found_person',
]
def optionally_filter_sensitive_fields(records, auth=None):
"""Removes sensitive fields from a list of dictionaries, unless the client
has full read authorization.
Args:
records (list of dict): A list of dictionaries which represent either of:
- a person record
- a note record
- a joined record of a person and a note
"""
if not (auth and auth.full_read_permission):
filter_sensitive_fields(records)
def filter_sensitive_fields(records):
"""Removes sensitive fields from a list of dictionaries.
Args:
records (list of dict): A list of dictionaries which represent either of:
- a person record
- a note record
- a joined record of a person and a note
"""
for record in records:
for prefix in ['', 'person_', 'note_']:
for field in SENSITIVE_FIELDS:
prefixed_field = prefix + field
if prefixed_field in record:
record[prefixed_field] = ''
def join_person_and_note_record(person_record, note_record):
"""Join a person record and a note record into a single dictionary.
The field names are prefixed with 'person_' or 'note_' to avoid name
collision. note_record can be None. In that case, person record field
names are still prefixed.
Args:
person_record (dict): A dictionary representation of a person record.
Cannot be None.
note_record (dict): A dictionary representation of a note record for
the person. Can be None.
"""
joined_record = {}
for name, value in person_record.iteritems():
new_name = get_field_name_for_joined_record(name, 'person')
joined_record[new_name] = value
if note_record:
assert (note_record['person_record_id'] ==
person_record['person_record_id'])
for name, value in note_record.iteritems():
new_name = get_field_name_for_joined_record(name, 'note')
joined_record[new_name] = value
return joined_record
def get_field_name_for_joined_record(original_field_name, record_type):
"""Converts a field name in a person/note record into a field name used in
a joined record of a person and a note.
See also join_person_and_note_record().
Args:
original_field_name (str): A field name in a person/note record.
record_type (str): 'person' or 'note'.
"""
if original_field_name in ('person_record_id', 'note_record_id'):
return original_field_name
else:
return '%s_%s' % (record_type, original_field_name)
# The current time for testing as a datetime object, or None if using real time.
_utcnow_for_test = None
def set_utcnow_for_test(now):
"""Sets the current time for testing purposes. Pass in a datetime object
or a timestamp in epoch seconds; or pass None to revert to real time."""
global _utcnow_for_test
if isinstance(now, (int, float)):
now = datetime.utcfromtimestamp(float(now))
_utcnow_for_test = now
def get_utcnow():
"""Returns the current UTC datetime (settable with set_utcnow_for_test)."""
global _utcnow_for_test
return (_utcnow_for_test is None) and datetime.utcnow() or _utcnow_for_test
def get_timestamp(dt):
"""Converts datetime object to a float value in epoch seconds."""
return calendar.timegm(dt.utctimetuple()) + dt.microsecond * 1e-6
def get_utcnow_timestamp():
"""Returns the current time in epoch seconds (settable with
set_utcnow_for_test)."""
return get_timestamp(get_utcnow())
def log_api_action(handler, action, num_person_records=0, num_note_records=0,
people_skipped=0, notes_skipped=0):
"""Log an API action."""
if handler.config and handler.config.api_action_logging:
model.ApiActionLog.record_action(
handler.repo, handler.params.key,
handler.params.version.version, action,
num_person_records, num_note_records,
people_skipped, notes_skipped,
handler.request.headers.get('User-Agent'),
handler.request.remote_addr, handler.request.url)
def get_full_name(given_name, family_name, config):
"""Return full name string obtained by concatenating given_name and
family_name in the order specified by config.family_name_first, or just
given_name if config.use_family_name is False."""
if config.use_family_name:
separator = (given_name and family_name) and u' ' or u''
if config.family_name_first:
return separator.join([family_name, given_name])
else:
return separator.join([given_name, family_name])
else:
return given_name
def send_confirmation_email_to_record_author(
handler, person, action, confirm_url, record_id):
"""Send the author an email to confirm enabling/disabling notes
of a record."""
if not person.author_email:
return handler.error(
400, _('No author email for record %(id)s.') % {'id' : record_id})
# i18n: Subject line of an e-mail message confirming the author
# wants to disable notes for this record
if action == 'enable':
subject = _('[Person Finder] Enable notes on "%(full_name)s"?'
) % {'full_name': person.primary_full_name}
elif action == 'disable':
subject = _('[Person Finder] Disable notes on "%(full_name)s"?'
) % {'full_name': person.primary_full_name}
else:
raise ValueError('Unknown action: %s' % action)
# send e-mail to record author confirming the lock of this record.
template_name = '%s_notes_email.txt' % action
handler.send_mail(
subject=subject,
to=person.author_email,
body=handler.render_to_string(
template_name,
author_name=person.author_name,
full_name=person.primary_full_name,
site_url=handler.get_url('/'),
confirm_url=confirm_url
)
)
def get_repo_url(request, repo, scheme=None):
"""Constructs the absolute root URL for a given repository."""
req_scheme, req_netloc, req_path, _, _ = urlparse.urlsplit(request.url)
prefix = req_path.startswith('/personfinder') and '/personfinder' or ''
if is_dev_app_server():
scheme = 'http' # HTTPS is not available when using dev_appserver
return (scheme or req_scheme) + '://' + req_netloc + prefix + '/' + repo
def get_url(request, repo, action, charset='utf-8', scheme=None, **params):
"""Constructs the absolute URL for a given action and query parameters,
preserving the current repo and the parameters listed in
PRESERVED_QUERY_PARAM_NAMES."""
repo_url = get_repo_url(request, repo or 'global', scheme)
for name in PRESERVED_QUERY_PARAM_NAMES:
params[name] = params.get(name, request.get(name, None))
query = urlencode(params, charset)
return repo_url + '/' + action.lstrip('/') + (query and '?' + query or '')
def add_profile_icon_url(website, handler):
website = copy.deepcopy(website) # avoid modifying the original
website['icon_url'] = \
handler.env.global_url + '/' + website['icon_filename']
return website
def strip_url_scheme(url):
if not url:
return url
_, netloc, path, query, segment = urlparse.urlsplit(url)
return urlparse.urlunsplit(('', netloc, path, query, segment))
def is_dev_app_server():
return os.environ['APPLICATION_ID'].startswith('dev~')
# ==== Struct ==================================================================
class Struct:
"""A simple bag of attributes."""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def get(self, name, default=None):
return self.__dict__.get(name, default)
# ==== Key management ======================================================
def generate_random_key(length):
"""Generates a random key with given length."""
source = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'1234567890'
'-_')
rng = random.SystemRandom()
return ''.join(rng.choice(source) for i in range(length))
# ==== Decorators ============================================================
def require_api_key_management_permission(handler_method):
"""
This is a decorator for API Key management feature. The limitation
is that the decorator can not preserve payloads within a POST/PUT
request.
Usage:
class SomeHandler(utils.BaseHandler):
@utils.require_api_key_management_permission
def get(self):
# ....
# ....
"""
def inner(*args, **kwargs):
handler = args[0]
user = users.get_current_user()
if (users.is_current_user_admin() or
(user and handler.config.key_management_operators and
user.email() in handler.config.key_management_operators)):
return handler_method(*args, **kwargs)
else:
return handler.redirect(
users.create_login_url(handler.request.url))
return inner
# ==== Base Handler ============================================================
class BaseHandler(webapp.RequestHandler):
# Handlers that don't need a repository name can set this to False.
repo_required = True
# Handlers that require HTTPS can set this to True.
https_required = False
# Set this to True to enable a handler even for deactivated repositories.
ignore_deactivation = False
# Handlers that require an admin permission must set this to True.
admin_required = False
# List all accepted query parameters here with their associated validators.
auto_params = {
'action': strip,
'add_note': validate_yes,
'age': validate_age,
'alternate_family_names': strip,
'alternate_given_names': strip,
'author_email': strip,
'author_made_contact': validate_yes,
'author_name': strip,
'author_phone': strip,
'your_own_email': strip,
'your_own_phone': strip,
'believed_dead_permission': validate_checkbox_as_bool,
'cache_seconds': validate_cache_seconds,
'clone': validate_yes,
'confirm': validate_yes,
'contact_email': strip,
'contact_name': strip,
'content_id': strip,
'context': strip,
'cursor': strip,
'date_of_birth': validate_approximate_date,
'description': strip,
'domain_write_permission': strip,
'dupe_notes': validate_yes,
'email_of_found_person': strip,
'error': strip,
'expiry_option': validate_expiry,
'family_name': strip,
'full_read_permission': validate_checkbox_as_bool,
'given_name': strip,
'home_city': strip,
'home_country': strip,
'home_neighborhood': strip,
'home_postal_code': strip,
'home_state': strip,
'id': strip,
'id1': strip,
'id2': strip,
'id3': strip,
'is_valid': validate_checkbox_as_bool,
'key': strip,
'lang': validate_lang,
'last_known_location': strip,
'mark_notes_reviewed': validate_checkbox_as_bool,
'max_results': validate_int,
'min_entry_date': validate_datetime,
'new_repo': validate_repo,
'note_photo': validate_image,
'note_photo_url': strip,
'omit_notes': validate_yes,
'operation': strip,
'organization_name': strip,
'person_record_id': strip,
'phone_of_found_person': strip,
'photo': validate_image,
'photo_url': strip,
'profile_url1': strip,
'profile_url2': strip,
'profile_url3': strip,
'query': strip,
'query_name': strip,
'query_location': strip,
'query_type': strip,
'read_permission': validate_checkbox_as_bool,
'referrer': strip,
'resource_bundle': validate_resource_name,
'resource_bundle_default': validate_resource_name,
'resource_bundle_original': validate_resource_name,
'resource_lang': validate_lang,
'resource_name': validate_resource_name,
'role': validate_role,
'search_engine_id': validate_int,
'search_permission': validate_checkbox_as_bool,
'sex': validate_sex,
'signature': strip,
'skip': validate_int,
'small': validate_yes,
'source': strip,
'source_date': strip,
'source_name': strip,
'source_url': strip,
'stats_permission': validate_checkbox_as_bool,
'status': validate_status,
'style': strip,
'subscribe': validate_checkbox,
'subscribe_own_info': validate_checkbox,
'subscribe_email': strip,
'subscribe_permission': validate_checkbox_as_bool,
'suppress_redirect': validate_yes,
'target': strip,
'text': strip,
'thumb': validate_checkbox_as_bool,
'timestamp': validate_timestamp,
'ui': strip_and_lower,
'utcnow': validate_timestamp,
'version': validate_version,
'own_info': validate_yes,
'xsrf_token': strip,
}
def redirect(self, path, repo=None, permanent=False, **params):
# This will prepend the repo to the path to create a working URL,
# unless the path has a global prefix or is an absolute URL.
if re.match('^[a-z]+:', path) or GLOBAL_PATH_RE.match(path):
if params:
path += '?' + urlencode(params, self.charset)
else:
path = self.get_url(path, repo, **params)
return webapp.RequestHandler.redirect(self, path, permanent=permanent)
def render(self, name, language_override=None, cache_seconds=0,
get_vars=lambda: {}, **vars):
"""Renders a template to the output stream, passing in the variables
specified in **vars as well as any additional variables returned by
get_vars(). Since this is intended for use by a dynamic page handler,
caching is off by default; if cache_seconds is positive, then
get_vars() will be called only when cached content is unavailable."""
self.write(self.render_to_string(
name, language_override, cache_seconds, get_vars, **vars))
def render_to_string(self, name, language_override=None, cache_seconds=0,
get_vars=lambda: {}, **vars):
"""Renders a template to a string, passing in the variables specified
in **vars as well as any additional variables returned by get_vars().
Since this is intended for use by a dynamic page handler, caching is
off by default; if cache_seconds is positive, then get_vars() will be
called only when cached content is unavailable."""
# TODO(kpy): Make the contents of extra_key overridable by callers?
lang = language_override or self.env.lang
extra_key = (self.env.repo, self.env.charset, self.request.query_string)
def get_all_vars():
vars.update(get_vars())
for key in ('env', 'config', 'params'):
if key in vars:
raise Exception(
'Cannot use "%s" as a key in vars. It is reserved.'
% key)
vars['env'] = self.env # pass along application-wide context
vars['config'] = self.config # pass along the configuration
vars['params'] = self.params # pass along the query parameters
return vars
return resources.get_rendered(
name, lang, extra_key, get_all_vars, cache_seconds)
def error(self, code, message='', message_html=''):
self.info(code, message, message_html, style='error')
def info(self, code, message='', message_html='', style='info'):
"""Renders a simple page with a message.
Args:
code: HTTP status code.
message: A message in plain text.
message_html: A message in HTML.
style: 'info', 'error' or 'plain'. 'info' and 'error' differs in
appearance. 'plain' just renders the message without extra
HTML tags. Good for API response.
"""
is_error = 400 <= code < 600
if is_error:
webapp.RequestHandler.error(self, code)
else:
self.response.set_status(code)
if not message and not message_html:
message = '%d: %s' % (code, httplib.responses.get(code))
if style == 'plain':
self.__render_plain_message(message, message_html)
else:
try:
self.render('message.html', cls=style,
message=message, message_html=message_html)
except:
self.__render_plain_message(message, message_html)
self.terminate_response()
def __render_plain_message(self, message, message_html):
self.response.out.write(
django.utils.html.escape(message) +
('<p>' if message and message_html else '') +
message_html)
def terminate_response(self):
"""Prevents any further output from being written."""
self.response.out.write = lambda *args: None
self.get = lambda *args: None
self.post = lambda *args: None
def write(self, text):
"""Sends text to the client using the charset from select_charset()."""
self.response.out.write(text.encode(self.env.charset, 'replace'))
def get_url(self, action, repo=None, scheme=None, **params):
"""Constructs the absolute URL for a given action and query parameters,
preserving the current repo and the parameters listed in
PRESERVED_QUERY_PARAM_NAMES."""
return get_url(self.request, repo or self.env.repo, action,
charset=self.env.charset, scheme=scheme, **params)
@staticmethod
def add_task_for_repo(repo, name, action, **kwargs):
"""Queues up a task for an individual repository."""
task_name = '%s-%s-%s' % (repo, name, int(time.time()*1000))
path = '/%s/%s' % (repo, action)
taskqueue.add(name=task_name, method='GET', url=path, params=kwargs)
def send_mail(self, to, subject, body):
"""Sends e-mail using a sender address that's allowed for this app."""
app_id = get_app_name()
sender = 'Do not reply <do-not-reply@%s.%s>' % (app_id, EMAIL_DOMAIN)
logging.info('Add mail task: recipient %r, subject %r' % (to, subject))
taskqueue.add(queue_name='send-mail', url='/global/admin/send_mail',
params={'sender': sender,
'to': to,
'subject': subject,
'body': body})
def get_captcha_html(self, error_code=None, use_ssl=False):
"""Generates the necessary HTML to display a CAPTCHA validation box."""
# We use the 'custom_translations' parameter for UI messages, whereas
# the 'lang' parameter controls the language of the challenge itself.
# reCAPTCHA falls back to 'en' if this parameter isn't recognized.
lang = self.env.lang.split('-')[0]
return captcha.get_display_html(
site_key=config.get('captcha_site_key'),
use_ssl=use_ssl, error=error_code, lang=lang
)
def get_captcha_response(self):
"""Returns an object containing the CAPTCHA response information for the
given request's CAPTCHA field information."""
# Allows faking the CAPTCHA response by an HTTP request parameter, but
# only locally, for testing purpose.
faked_captcha_response = self.request.get('faked_captcha_response')
if faked_captcha_response and self.request.remote_addr == '127.0.0.1':
return captcha.RecaptchaResponse(
is_valid=faked_captcha_response == 'success')
captcha_response = self.request.get('g-recaptcha-response')
return captcha.submit(captcha_response)
def handle_exception(self, exception, debug_mode):
logging.error(traceback.format_exc())
self.error(500, _(
'There was an error processing your request. Sorry for the '
'inconvenience. Our administrators will investigate the source '
'of the problem, but please check that the format of your '
'request is correct.'))
def __get_env_language_for_babel(self):
language_code = self.env.lang
try:
return babel.Locale.parse(language_code, sep='-')
except babel.UnknownLocaleError as e:
# fallback language
return babel.Locale('en')
def to_local_time(self, date):
"""Converts a datetime object to the local time configured for the
current repository. For convenience, returns None if date is None."""
# TODO(kpy): This only works for repositories that have a single fixed
# time zone offset and never use Daylight Saving Time.
if date:
if self.config.time_zone_offset:
return date + timedelta(0, 3600*self.config.time_zone_offset)
return date
def format_datetime_localized(self, dt):
"""Formats a datetime object to a localized human-readable string based
on the current locale."""
return format_datetime(dt, locale=self.__get_env_language_for_babel());
def format_date_localized(self, dt):
"""Formats a datetime object to a localized human-readable string based
on the current locale containing only the date."""
return format_date(dt, locale=self.__get_env_language_for_babel());
def format_time_localized(self, dt):
"""Formats a datetime object to a localized human-readable string based
on the current locale containing only the time."""
return format_time(dt, locale=self.__get_env_language_for_babel());
def to_formatted_local_datetime(self, dt):
"""Converts a datetime object to the local datetime configured for the
current repository and formats to a localized human-readable string
based on the current locale."""
dt = self.to_local_time(dt)
return self.format_datetime_localized(dt)
def to_formatted_local_date(self, dt):
"""Converts a datetime object to the local date configured for the
current repository and formats to a localized human-readable string
based on the current locale."""
dt = self.to_local_time(dt)
return self.format_date_localized(dt)
def to_formatted_local_time(self, dt):
"""Converts a datetime object to the local time configured for the
current repository and formats to a localized human-readable string
based on the current locale."""
dt = self.to_local_time(dt)
return self.format_time_localized(dt)
def maybe_redirect_for_repo_alias(self, request):
"""If the specified repository name is an alias, redirects to the URL
with the canonical repository name and returns True. Otherwise returns
False.
"""
# Config repo_alias is a dictionary from a repository name alias to
# its canonical.
# e.g., {'yol': '2013-yolanda', 'jam': '2014-jammu-kashmir-floods'}
#
# A repository name alias can be used instead of the canonical
# repository name in URLs. This is especially useful combined with
# the short URL. e.g., You can access
# https://www.google.org/personfinder/2014-jammu-kashmir-floods
# by https://g.co/pf/jam .
if not self.repo:
return False
repo_aliases = config.get('repo_aliases', default={})
if self.repo in repo_aliases:
canonical_repo = repo_aliases[self.repo]
params = {}
for name in request.arguments():
params[name] = request.get(name)
# Redirects to the same URL including the query parameters, except
# for the repository name.
self.redirect('/' + self.env.action, repo=canonical_repo, **params)
self.terminate_response()
return True
else:
return False
def should_show_inline_photo(self, photo_url):
"""Returns True if we should show the photo in our site directly with
<img> tag. In zero-rating mode, it returns True only if the photo is
served by our domain, to avoid loading resources in other domains in
Person Finder.
See "Zero-rating" section of the admin page
(app/resources/admin.html.template) for details of zero-rating mode.
"""
if not photo_url:
return False
elif self.env.config.zero_rating_mode:
_, our_netloc, _, _, _ = urlparse.urlsplit(self.request.url)
_, photo_netloc, _, _, _ = urlparse.urlsplit(photo_url)
return photo_netloc == our_netloc
else:
return True
URL_PARSE_QUERY_INDEX = 4
def get_thumbnail_url(self, photo_url):
"""Get a thumbnail URL for an uploaded photo's URL.
Args:
photo_url: a photo URL for an uploaded photo
"""
if not photo_url:
return None
parsed_url = list(urlparse.urlparse(photo_url))
params_dict = dict(urlparse.parse_qsl(
parsed_url[BaseHandler.URL_PARSE_QUERY_INDEX]))
params_dict['thumb'] = 'true'
parsed_url[BaseHandler.URL_PARSE_QUERY_INDEX] = urllib.urlencode(
params_dict)
return urlparse.urlunparse(parsed_url)
def __return_unimplemented_method_error(self):
return self.error(
405,
'HTTP method %s is not allowed for this URL.'
% self.request.method)
def __init__(self, request, response, env):
webapp.RequestHandler.__init__(self, request, response)
self.params = Struct()
self.env = env
self.repo = env.repo
self.config = env.config
self.charset = env.charset
# Set default Content-Type header.
self.response.headers['Content-Type'] = (
'text/html; charset=%s' % self.charset)
if self.admin_required:
self.response.headers['X-Frame-Options'] = 'SAMEORIGIN'
# Validate query parameters.
for name, validator in self.auto_params.items():
try:
value = self.request.get(name, '')
setattr(self.params, name, validator(value))
except Exception, e:
setattr(self.params, name, validator(None))
return self.error(400, 'Invalid parameter %s: %s' % (name, e))
# Ensure referrer is in whitelist, if it exists
if self.params.referrer and (not self.params.referrer in
self.config.referrer_whitelist):
setattr(self.params, 'referrer', '')
# Check for SSL (unless running local dev app server).
if self.https_required and not is_dev_app_server():
if self.env.scheme != 'https':
return self.error(403, 'HTTPS is required.')
# Handles repository alias.
if self.maybe_redirect_for_repo_alias(request):
return
# Check for an authorization key.
self.auth = None
if self.params.key:
if self.repo:
# check for domain specific one.
self.auth = model.Authorization.get(self.repo, self.params.key)
if not self.auth:
# perhaps this is a global key ('*' for consistency with config).
self.auth = model.Authorization.get('*', self.params.key)
if self.auth and not self.auth.is_valid:
self.auth = None
# Shows a custom error page here when the user is not an admin
# instead of "login: admin" in app.yaml
# If we use it, user can't sign out
# because the error page of "login: admin" doesn't have sign-out link.
if self.admin_required:
user = users.get_current_user()
if not user:
login_url = users.create_login_url(self.request.url)
webapp.RequestHandler.redirect(self, login_url)
self.terminate_response()
return
if not users.is_current_user_admin():
logout_url = users.create_logout_url(self.request.url)
self.render('not_admin_error.html', logout_url=logout_url, user=user)
self.terminate_response()
return
# Handlers that don't need a repository configuration can skip it.
if not self.repo:
if self.repo_required:
return self.error(400, 'No repository specified.')
return
# Everything after this requires a repo.
# Reject requests for repositories that don't exist.
if not model.Repo.get_by_key_name(self.repo):
html = 'No such repository. '
if self.env.repo_options:
html += 'Select:<p>' + self.render_to_string('repo-menu.html')
return self.error(404, message_html=html)
# If this repository has been deactivated, terminate with a message.
# The ignore_deactivation flag is for admin pages that bypass this.
if self.config.deactivated and not self.ignore_deactivation:
self.env.language_menu = []
self.env.robots_ok = True
self.render('message.html', cls='deactivation',
message_html=self.config.deactivation_message_html)
self.terminate_response()
def get(self, *args):
"""Default handler implementation which returns HTTP status 405."""
return self.__return_unimplemented_method_error()
def post(self, *args):
"""Default handler implementation which returns HTTP status 405."""
return self.__return_unimplemented_method_error()
def put(self, *args):
"""Default handler implementation which returns HTTP status 405."""
return self.__return_unimplemented_method_error()
def head(self, *args):
"""Default handler implementation which returns HTTP status 405."""
return self.__return_unimplemented_method_error()
def options(self, *args):
"""Default handler implementation which returns HTTP status 405."""
return self.__return_unimplemented_method_error()
def delete(self, *args):
"""Default handler implementation which returns HTTP status 405."""
return self.__return_unimplemented_method_error()
def trace(self, *args):
"""Default handler implementation which returns HTTP status 405."""
return self.__return_unimplemented_method_error()
# ==== XSRF protection =========================================================
class XsrfTool(object):
# XSRF tokens expire after 4 hours.
TOKEN_EXPIRATION_TIME = 60 * 60 * 4
def __init__(self):
configured_key = config.get('xsrf_token_key')
if configured_key:
# config.get returns unicode, but hmac is going to want a str
self._key = configured_key.encode('utf-8')
else:
configured_key = generate_random_key(20)
config.set(xsrf_token_key=configured_key)
self._key = configured_key
def generate_token(self, user_id, action_id):
action_time = get_utcnow_timestamp()
return '%s/%f' % (
self._generate_hmac_digest(user_id, action_id, action_time),
action_time)
def verify_token(self, token, user_id, action_id):
[hmac_digest, action_time_str] = token.split('/')
action_time = float(action_time_str)
if (action_time + XsrfTool.TOKEN_EXPIRATION_TIME <
get_utcnow_timestamp()):
return False
expected_hmac_digest = self._generate_hmac_digest(
user_id, action_id, action_time)
return hmac.compare_digest(
hmac_digest.encode('utf-8'), expected_hmac_digest)
def _generate_hmac_digest(self, user_id, action_id, action_time):
hmac_obj = hmac.new(
self._key, '%s/%s/%f' % (user_id, action_id, action_time))
return hmac_obj.hexdigest()
| |
# yellowbrick.text.dispersion
# Implementations of lexical dispersions for text visualization.
#
# Author: Larry Gray
# Created: 2018-06-21 10:06
#
# Copyright (C) 2018 District Data Labs
# For license information, see LICENSE.txt
#
# ID: dispersion.py [] lwgray@gmail.com $
"""
Implementation of lexical dispersion for text visualization
"""
##########################################################################
## Imports
##########################################################################
from collections import defaultdict
import itertools
from yellowbrick.text.base import TextVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
import numpy as np
##########################################################################
## Dispersion Plot Visualizer
##########################################################################
class DispersionPlot(TextVisualizer):
"""
DispersionPlotVisualizer allows for visualization of the lexical dispersion
of words in a corpus. Lexical dispersion is a measure of a word's
homeogeneity across the parts of a corpus. This plot notes the occurences
of a word and how many words from the beginning it appears.
Parameters
----------
target_words : list
A list of target words whose dispersion across a corpus passed at fit
will be visualized.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
kwargs : dict
Pass any additional keyword arguments to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
# NOTE: cannot be np.nan
NULL_CLASS = None
def __init__(self, target_words, ax=None, colors=None, ignore_case=False,
annotate_docs=False, labels=None, colormap=None, **kwargs):
super(DispersionPlot, self).__init__(ax=ax, **kwargs)
self.labels = labels
self.colors = colors
self.colormap = colormap
self.target_words = target_words
self.ignore_case = ignore_case
self.annotate_docs = annotate_docs
def _compute_dispersion(self, text, y):
self.boundaries_ = []
offset = 0
if y is None:
y = itertools.repeat(None)
for doc, target in zip(text, y):
for word in doc:
if self.ignore_case:
word = word.lower()
# NOTE: this will find all indices if duplicate words are supplied
# In the case that word is not in target words, any empty list is
# returned and no data will be yielded
offset += 1
for y_coord in (self.indexed_words_ == word).nonzero()[0]:
y_coord = int(y_coord)
yield (offset, y_coord, target)
if self.annotate_docs:
self.boundaries_.append(offset)
self.boundaries_ = np.array(self.boundaries_, dtype=int)
def _check_missing_words(self, points):
for index in range(len(self.indexed_words_)):
if index in points[:,1]:
pass
else:
raise YellowbrickValueError((
"The indexed word '{}' is not found in "
"this corpus"
).format(self.indexed_words_[index]))
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the dispersion
visualization.
Parameters
----------
X : list or generator
Should be provided as a list of documents or a generator
that yields a list of documents that contain a list of
words in the order they appear in the document.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
if y is not None:
self.classes_ = np.unique(y)
elif y is None and self.labels is not None:
self.classes_ = np.array([self.labels[0]])
else:
self.classes_ = np.array([self.NULL_CLASS])
# Create an index (e.g. the y position) for the target words
self.indexed_words_ = np.flip(self.target_words, axis=0)
if self.ignore_case:
self.indexed_words_ = np.array([w.lower() for w in self.indexed_words_])
# Stack is used to create a 2D array from the generator
try:
points_target = np.stack(self._compute_dispersion(X, y))
except ValueError:
raise YellowbrickValueError((
"No indexed words were found in the corpus"
))
points = np.stack(zip(points_target[:,0].astype(int),
points_target[:,1].astype(int)))
self.target = points_target[:,2]
self._check_missing_words(points)
self.draw(points, self.target)
return self
def draw(self, points, target=None, **kwargs):
"""
Called from the fit method, this method creates the canvas and
draws the plot on it.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Resolve the labels with the classes
labels = self.labels if self.labels is not None else self.classes_
if len(labels) != len(self.classes_):
raise YellowbrickValueError((
"number of supplied labels ({}) does not "
"match the number of classes ({})"
).format(len(labels), len(self.classes_)))
# Create the color mapping for the labels.
color_values = resolve_colors(
n_colors=len(labels), colormap=self.colormap, colors=self.color)
colors = dict(zip(labels, color_values))
# Transform labels into a map of class to label
labels = dict(zip(self.classes_, labels))
# Define boundaries with a vertical line
if self.annotate_docs:
for xcoords in self.boundaries_:
self.ax.axvline(x=xcoords, color='lightgray', linestyle='dashed')
series = defaultdict(lambda: {'x':[], 'y':[]})
if target is not None:
for point, t in zip(points, target):
label = labels[t]
series[label]['x'].append(point[0])
series[label]['y'].append(point[1])
else:
label = self.classes_[0]
for x, y in points:
series[label]['x'].append(x)
series[label]['y'].append(y)
for label, points in series.items():
self.ax.scatter(points['x'], points['y'], marker='|',
c=colors[label], zorder=100, label=label)
self.ax.set_yticks(list(range(len(self.indexed_words_))))
self.ax.set_yticklabels(self.indexed_words_)
def finalize(self, **kwargs):
"""
The finalize method executes any subclass-specific axes
finalization steps. The user calls poof & poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
self.ax.set_ylim(-1, len(self.indexed_words_))
self.ax.set_title("Lexical Dispersion Plot")
self.ax.set_xlabel("Word Offset")
self.ax.grid(False)
# Add the legend outside of the figure box.
if not all(self.classes_ == np.array([self.NULL_CLASS])):
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
self.ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
##########################################################################
## Quick Method
##########################################################################
def dispersion(words, corpus, y=None, ax=None, colors=None, colormap=None,
labels=None, annotate_docs=False, ignore_case=False, **kwargs):
""" Displays lexical dispersion plot for words in a corpus
This helper function is a quick wrapper to utilize the DisperstionPlot
Visualizer for one-off analysis
Parameters
----------
words : list
A list of words whose dispersion will be examined within a corpus
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
corpus : list
Should be provided as a list of documents that contain
a list of words in the order they appear in the document.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
kwargs : dict
Pass any additional keyword arguments to the super class.
Returns
-------
ax: matplotlib axes
Returns the axes that the plot was drawn on
"""
# Instantiate the visualizer
visualizer = DispersionPlot(
words, ax=ax, colors=colors, colormap=colormap,
ignore_case=ignore_case, labels=labels,
annotate_docs=annotate_docs, **kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(corpus, y, **kwargs)
# Return the axes object on the visualizer
return visualizer.ax
| |
"""Tests related to embargoes of registrations"""
import datetime
import httplib as http
import json
from modularodm import Q
from modularodm.exceptions import ValidationValueError
import mock
from nose.tools import * # noqa
from tests.base import fake, OsfTestCase
from tests.factories import (
AuthUserFactory, EmbargoFactory, NodeFactory, ProjectFactory,
RegistrationFactory, UserFactory, UnconfirmedUserFactory, DraftRegistrationFactory
)
from tests import utils
from framework.exceptions import PermissionsError
from framework.auth import Auth
from website.exceptions import (
InvalidSanctionRejectionToken, InvalidSanctionApprovalToken, NodeStateError,
)
from website import tokens
from website.models import Embargo, Node, User
from website.project.model import ensure_schemas
from website.project.sanctions import PreregCallbackMixin
from website.util import permissions
from website.exceptions import NodeStateError
DUMMY_TOKEN = tokens.encode({
'dummy': 'token'
})
class RegistrationEmbargoModelsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationEmbargoModelsTestCase, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user)
self.registration = RegistrationFactory(project=self.project)
self.embargo = EmbargoFactory(user=self.user)
self.valid_embargo_end_date = datetime.datetime.utcnow() + datetime.timedelta(days=3)
# Node#_initiate_embargo tests
def test__initiate_embargo_saves_embargo(self):
initial_count = Embargo.find().count()
self.registration._initiate_embargo(
self.user,
self.valid_embargo_end_date,
for_existing_registration=True
)
assert_equal(Embargo.find().count(), initial_count + 1)
def test_state_can_be_set_to_complete(self):
embargo = EmbargoFactory()
embargo.state = Embargo.COMPLETED
embargo.save() # should pass validation
assert_equal(embargo.state, Embargo.COMPLETED)
def test__initiate_embargo_does_not_create_tokens_for_unregistered_admin(self):
unconfirmed_user = UnconfirmedUserFactory()
self.registration.contributors.append(unconfirmed_user)
self.registration.add_permission(unconfirmed_user, 'admin', save=True)
assert_true(self.registration.has_permission(unconfirmed_user, 'admin'))
embargo = self.registration._initiate_embargo(
self.user,
self.valid_embargo_end_date,
for_existing_registration=True
)
assert_true(self.user._id in embargo.approval_state)
assert_false(unconfirmed_user._id in embargo.approval_state)
def test__initiate_embargo_adds_admins_on_child_nodes(self):
project_admin = UserFactory()
project_non_admin = UserFactory()
child_admin = UserFactory()
child_non_admin = UserFactory()
grandchild_admin = UserFactory()
project = ProjectFactory(creator=project_admin)
project.add_contributor(project_non_admin, auth=Auth(project.creator), save=True)
child = NodeFactory(creator=child_admin, parent=project)
child.add_contributor(child_non_admin, auth=Auth(project.creator), save=True)
grandchild = NodeFactory(creator=grandchild_admin, parent=child) # noqa
embargo = project._initiate_embargo(
project.creator,
self.valid_embargo_end_date,
for_existing_registration=True
)
assert_in(project_admin._id, embargo.approval_state)
assert_in(child_admin._id, embargo.approval_state)
assert_in(grandchild_admin._id, embargo.approval_state)
assert_not_in(project_non_admin._id, embargo.approval_state)
assert_not_in(child_non_admin._id, embargo.approval_state)
def test__initiate_embargo_with_save_does_save_embargo(self):
initial_count = Embargo.find().count()
self.registration._initiate_embargo(
self.user,
self.valid_embargo_end_date,
for_existing_registration=True,
)
assert_equal(Embargo.find().count(), initial_count + 1)
# Node#embargo_registration tests
def test_embargo_from_non_admin_raises_PermissionsError(self):
self.registration.remove_permission(self.user, 'admin')
self.registration.save()
self.registration.reload()
with assert_raises(PermissionsError):
self.registration.embargo_registration(self.user, self.valid_embargo_end_date)
def test_embargo_end_date_in_past_raises_ValidationValueError(self):
with assert_raises(ValidationValueError):
self.registration.embargo_registration(
self.user,
datetime.datetime(1999, 1, 1)
)
def test_embargo_end_date_today_raises_ValidationValueError(self):
with assert_raises(ValidationValueError):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow()
)
def test_embargo_end_date_in_far_future_raises_ValidationValueError(self):
with assert_raises(ValidationValueError):
self.registration.embargo_registration(
self.user,
datetime.datetime(2099, 1, 1)
)
def test_embargo_with_valid_end_date_starts_pending_embargo(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
def test_embargo_public_project_makes_private_pending_embargo(self):
self.registration.is_public = True
assert_true(self.registration.is_public)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
assert_false(self.registration.is_public)
def test_embargo_non_registration_raises_NodeStateError(self):
self.registration.is_registration = False
self.registration.save()
with assert_raises(NodeStateError):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
assert_false(self.registration.is_pending_embargo)
# Embargo#approve_embargo tests
def test_invalid_approval_token_raises_InvalidSanctionApprovalToken(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
invalid_approval_token = 'not a real token'
with assert_raises(InvalidSanctionApprovalToken):
self.registration.embargo.approve_embargo(self.user, invalid_approval_token)
assert_true(self.registration.is_pending_embargo)
def test_non_admin_approval_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
with assert_raises(PermissionsError):
self.registration.embargo.approve_embargo(non_admin, approval_token)
assert_true(self.registration.is_pending_embargo)
def test_one_approval_with_one_admin_embargoes(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, approval_token)
assert_true(self.registration.embargo_end_date)
assert_false(self.registration.is_pending_embargo)
def test_approval_adds_to_parent_projects_log(self):
initial_project_logs = len(self.registration.registered_from.logs)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, approval_token)
# Logs: Created, registered, embargo initiated, embargo approved
assert_equal(len(self.registration.registered_from.logs), initial_project_logs + 2)
def test_one_approval_with_two_admins_stays_pending(self):
admin2 = UserFactory()
self.registration.contributors.append(admin2)
self.registration.add_permission(admin2, 'admin', save=True)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
# First admin approves
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, approval_token)
assert_true(self.registration.is_pending_embargo)
num_of_approvals = sum([val['has_approved'] for val in self.registration.embargo.approval_state.values()])
assert_equal(num_of_approvals, 1)
# Second admin approves
approval_token = self.registration.embargo.approval_state[admin2._id]['approval_token']
self.registration.embargo.approve_embargo(admin2, approval_token)
assert_true(self.registration.embargo_end_date)
assert_false(self.registration.is_pending_embargo)
num_of_approvals = sum([val['has_approved'] for val in self.registration.embargo.approval_state.values()])
assert_equal(num_of_approvals, 2)
# Embargo#disapprove_embargo tests
def test_invalid_rejection_token_raises_InvalidSanctionRejectionToken(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
with assert_raises(InvalidSanctionRejectionToken):
self.registration.embargo.disapprove_embargo(self.user, fake.sentence())
assert_true(self.registration.is_pending_embargo)
def test_non_admin_rejection_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
rejection_token = self.registration.embargo.approval_state[self.user._id]['rejection_token']
with assert_raises(PermissionsError):
self.registration.embargo.disapprove_embargo(non_admin, rejection_token)
assert_true(self.registration.is_pending_embargo)
def test_one_disapproval_cancels_embargo(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
rejection_token = self.registration.embargo.approval_state[self.user._id]['rejection_token']
self.registration.embargo.disapprove_embargo(self.user, rejection_token)
assert_equal(self.registration.embargo.state, Embargo.REJECTED)
assert_false(self.registration.is_pending_embargo)
def test_disapproval_adds_to_parent_projects_log(self):
initial_project_logs = len(self.registration.registered_from.logs)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
rejection_token = self.registration.embargo.approval_state[self.user._id]['rejection_token']
registered_from = self.registration.registered_from
self.registration.embargo.disapprove_embargo(self.user, rejection_token)
# Logs: Created, registered, embargo initiated, embargo cancelled
assert_equal(len(registered_from.logs), initial_project_logs + 2)
def test_cancelling_embargo_deletes_parent_registration(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
rejection_token = self.registration.embargo.approval_state[self.user._id]['rejection_token']
self.registration.embargo.disapprove_embargo(self.user, rejection_token)
assert_equal(self.registration.embargo.state, Embargo.REJECTED)
assert_true(self.registration.is_deleted)
def test_cancelling_embargo_deletes_component_registrations(self):
component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component'
)
subcomponent = NodeFactory(
creator=self.user,
parent=component,
title='Subcomponent'
)
project_registration = RegistrationFactory(project=self.project)
component_registration = project_registration.nodes[0]
subcomponent_registration = component_registration.nodes[0]
project_registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
project_registration.save()
rejection_token = project_registration.embargo.approval_state[self.user._id]['rejection_token']
project_registration.embargo.disapprove_embargo(self.user, rejection_token)
assert_equal(project_registration.embargo.state, Embargo.REJECTED)
assert_true(project_registration.is_deleted)
assert_true(component_registration.is_deleted)
assert_true(subcomponent_registration.is_deleted)
def test_cancelling_embargo_for_existing_registration_does_not_delete_registration(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
rejection_token = self.registration.embargo.approval_state[self.user._id]['rejection_token']
self.registration.embargo.disapprove_embargo(self.user, rejection_token)
assert_equal(self.registration.embargo.state, Embargo.REJECTED)
assert_false(self.registration.is_deleted)
def test_rejecting_embargo_for_existing_registration_does_not_deleted_component_registrations(self):
component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component'
)
subcomponent = NodeFactory(
creator=self.user,
parent=component,
title='Subcomponent'
)
project_registration = RegistrationFactory(project=self.project)
component_registration = project_registration.nodes[0]
subcomponent_registration = component_registration.nodes[0]
project_registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10),
for_existing_registration=True
)
rejection_token = project_registration.embargo.approval_state[self.user._id]['rejection_token']
project_registration.embargo.disapprove_embargo(self.user, rejection_token)
project_registration.save()
assert_equal(project_registration.embargo.state, Embargo.REJECTED)
assert_false(project_registration.is_deleted)
assert_false(component_registration.is_deleted)
assert_false(subcomponent_registration.is_deleted)
# Embargo property tests
def test_new_registration_is_pending_registration(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo_for_existing_registration)
def test_existing_registration_is_not_pending_registration(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_false(self.registration.is_pending_embargo_for_existing_registration)
def test_on_complete_notify_initiator(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10),
notify_initiator_on_complete=True
)
self.registration.save()
with mock.patch.object(PreregCallbackMixin, '_notify_initiator') as mock_notify:
self.registration.embargo._on_complete(self.user)
assert_equal(mock_notify.call_count, 1)
class RegistrationWithChildNodesEmbargoModelTestCase(OsfTestCase):
def setUp(self):
super(RegistrationWithChildNodesEmbargoModelTestCase, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.valid_embargo_end_date = datetime.datetime.utcnow() + datetime.timedelta(days=3)
self.project = ProjectFactory(title='Root', is_public=False, creator=self.user)
self.component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component'
)
self.subproject = ProjectFactory(
creator=self.user,
parent=self.project,
title='Subproject'
)
self.subproject_component = NodeFactory(
creator=self.user,
parent=self.subproject,
title='Subcomponent'
)
self.registration = RegistrationFactory(project=self.project)
# Reload the registration; else tests won't catch failures to save
self.registration.reload()
def test_approval_embargoes_descendant_nodes(self):
# Initiate embargo for parent registration
self.registration.embargo_registration(
self.user,
self.valid_embargo_end_date
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
# Ensure descendant nodes are pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_pending_embargo)
# Approve parent registration's embargo
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.registration.embargo.approve_embargo(self.user, approval_token)
assert_true(self.registration.embargo.embargo_end_date)
# Ensure descendant nodes are in embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.embargo_end_date)
def test_disapproval_cancels_embargo_on_descendant_nodes(self):
# Initiate embargo on parent registration
self.registration.embargo_registration(
self.user,
self.valid_embargo_end_date
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
# Ensure descendant nodes are pending embargo
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_true(node.is_pending_embargo)
# Disapprove parent registration's embargo
rejection_token = self.registration.embargo.approval_state[self.user._id]['rejection_token']
self.registration.embargo.disapprove_embargo(self.user, rejection_token)
assert_false(self.registration.is_pending_embargo)
assert_equal(self.registration.embargo.state, Embargo.REJECTED)
# Ensure descendant nodes' embargoes are cancelled
descendants = self.registration.get_descendants_recursive()
for node in descendants:
assert_false(node.is_pending_embargo)
assert_false(node.embargo_end_date)
class RegistrationEmbargoApprovalDisapprovalViewsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationEmbargoApprovalDisapprovalViewsTestCase, self).setUp()
self.user = AuthUserFactory()
self.registration = RegistrationFactory(creator=self.user)
# node_registration_embargo_approve tests
def test_GET_from_unauthorized_user_raises_HTTPForbidden(self):
unauthorized_user = AuthUserFactory()
res = self.app.get(
self.registration.web_url_for('view_project', token=DUMMY_TOKEN),
auth=unauthorized_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
def test_GET_approve_registration_without_embargo_raises_HTTPBad_Request(self):
assert_false(self.registration.is_pending_embargo)
res = self.app.get(
self.registration.web_url_for('view_project', token=DUMMY_TOKEN),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_GET_approve_with_invalid_token_returns_HTTPBad_Request(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
res = self.app.get(
self.registration.web_url_for('view_project', token=DUMMY_TOKEN),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_GET_approve_with_wrong_token_returns_HTTPBad_Request(self):
admin2 = UserFactory()
self.registration.contributors.append(admin2)
self.registration.add_permission(admin2, 'admin', save=True)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
wrong_approval_token = self.registration.embargo.approval_state[admin2._id]['approval_token']
res = self.app.get(
self.registration.web_url_for('view_project', token=wrong_approval_token),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_GET_approve_with_wrong_admins_token_returns_HTTPBad_Request(self):
admin2 = UserFactory()
self.registration.contributors.append(admin2)
self.registration.add_permission(admin2, 'admin', save=True)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
wrong_approval_token = self.registration.embargo.approval_state[admin2._id]['approval_token']
res = self.app.get(
self.registration.web_url_for('view_project', token=wrong_approval_token),
auth=self.user.auth,
expect_errors=True
)
assert_true(self.registration.is_pending_embargo)
assert_equal(res.status_code, 400)
@mock.patch('flask.redirect')
def test_GET_approve_with_valid_token_redirects(self, mock_redirect):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
self.app.get(
self.registration.web_url_for('view_project', token=approval_token),
auth=self.user.auth,
)
self.registration.embargo.reload()
assert_true(self.registration.embargo_end_date)
assert_false(self.registration.is_pending_embargo)
assert_true(mock_redirect.called_with(self.registration.web_url_for('view_project')))
def test_GET_from_unauthorized_user_returns_HTTPForbidden(self):
unauthorized_user = AuthUserFactory()
res = self.app.get(
self.registration.web_url_for('view_project', token=DUMMY_TOKEN),
auth=unauthorized_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
def test_GET_disapprove_registration_without_embargo_HTTPBad_Request(self):
assert_false(self.registration.is_pending_embargo)
res = self.app.get(
self.registration.web_url_for('view_project', token=DUMMY_TOKEN),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
def test_GET_disapprove_with_invalid_token_returns_HTTPBad_Request(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
res = self.app.get(
self.registration.web_url_for('view_project', token=DUMMY_TOKEN),
auth=self.user.auth,
expect_errors=True
)
self.registration.embargo.reload()
assert_true(self.registration.is_pending_embargo)
assert_equal(res.status_code, 400)
def test_GET_disapprove_with_wrong_admins_token_returns_HTTPBad_Request(self):
admin2 = UserFactory()
self.registration.contributors.append(admin2)
self.registration.add_permission(admin2, 'admin', save=True)
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
wrong_rejection_token = self.registration.embargo.approval_state[admin2._id]['rejection_token']
res = self.app.get(
self.registration.web_url_for('view_project', token=wrong_rejection_token),
auth=self.user.auth,
expect_errors=True
)
assert_true(self.registration.is_pending_embargo)
assert_equal(res.status_code, 400)
def test_GET_disapprove_with_valid(self):
project = ProjectFactory(creator=self.user)
registration = RegistrationFactory(project=project)
registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
registration.save()
assert_true(registration.is_pending_embargo)
rejection_token = registration.embargo.approval_state[self.user._id]['rejection_token']
res = self.app.get(
registration.registered_from.web_url_for('view_project', token=rejection_token),
auth=self.user.auth,
)
registration.embargo.reload()
assert_equal(registration.embargo.state, Embargo.REJECTED)
assert_false(registration.is_pending_embargo)
assert_equal(res.status_code, 200)
assert_equal(project.web_url_for('view_project'), res.request.path)
def test_GET_disapprove_for_existing_registration_returns_200(self):
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10),
for_existing_registration=True
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
rejection_token = self.registration.embargo.approval_state[self.user._id]['rejection_token']
res = self.app.get(
self.registration.web_url_for('view_project', token=rejection_token),
auth=self.user.auth,
)
self.registration.embargo.reload()
assert_equal(self.registration.embargo.state, Embargo.REJECTED)
assert_false(self.registration.is_pending_embargo)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.registration.web_url_for('view_project'))
class RegistrationEmbargoViewsTestCase(OsfTestCase):
def setUp(self):
super(RegistrationEmbargoViewsTestCase, self).setUp()
ensure_schemas()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.draft = DraftRegistrationFactory(branched_from=self.project)
self.registration = RegistrationFactory(project=self.project, creator=self.user)
current_month = datetime.datetime.now().strftime("%B")
current_year = datetime.datetime.now().strftime("%Y")
self.valid_make_public_payload = json.dumps({
u'embargoEndDate': u'Fri, 01, {month} {year} 00:00:00 GMT'.format(
month=current_month,
year=current_year
),
u'registrationChoice': 'immediate',
u'summary': unicode(fake.sentence())
})
valid_date = datetime.datetime.now() + datetime.timedelta(days=180)
self.valid_embargo_payload = json.dumps({
u'embargoEndDate': unicode(valid_date.strftime('%a, %d, %B %Y %H:%M:%S')) + u' GMT',
u'registrationChoice': 'embargo',
u'summary': unicode(fake.sentence())
})
self.invalid_embargo_date_payload = json.dumps({
u'embargoEndDate': u"Thu, 01 {month} {year} 05:00:00 GMT".format(
month=current_month,
year=str(int(current_year)-1)
),
u'registrationChoice': 'embargo',
u'summary': unicode(fake.sentence())
})
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_register_draft_without_embargo_creates_registration_approval(self, mock_enqueue):
res = self.app.post(
self.project.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.valid_make_public_payload,
content_type='application/json',
auth=self.user.auth
)
assert_equal(res.status_code, 202)
registration = Node.find().sort('-registered_date')[0]
assert_true(registration.is_registration)
assert_not_equal(registration.registration_approval, None)
# Regression test for https://openscience.atlassian.net/browse/OSF-5039
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_POST_register_make_public_immediately_creates_private_pending_registration_for_public_project(self, mock_enqueue):
self.project.is_public = True
self.project.save()
component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component',
is_public=True
)
subproject = ProjectFactory(
creator=self.user,
parent=self.project,
title='Subproject',
is_public=True
)
subproject_component = NodeFactory(
creator=self.user,
parent=subproject,
title='Subcomponent',
is_public=True
)
res = self.app.post(
self.project.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.valid_make_public_payload,
content_type='application/json',
auth=self.user.auth
)
self.project.reload()
assert_equal(res.status_code, 202)
assert_equal(res.json['urls']['registrations'], self.project.web_url_for('node_registrations'))
# Last node directly registered from self.project
registration = Node.find(
Q('registered_from', 'eq', self.project)
).sort('-registered_date')[0]
assert_true(registration.is_registration)
assert_false(registration.is_public)
for node in registration.get_descendants_recursive():
assert_true(node.is_registration)
assert_false(node.is_public)
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_POST_register_make_public_does_not_make_children_public(self, mock_enqueue):
component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component'
)
subproject = ProjectFactory(
creator=self.user,
parent=self.project,
title='Subproject'
)
subproject_component = NodeFactory(
creator=self.user,
parent=subproject,
title='Subcomponent'
)
res = self.app.post(
self.project.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.valid_make_public_payload,
content_type='application/json',
auth=self.user.auth
)
self.project.reload()
# Last node directly registered from self.project
registration = self.project.registrations_all[-1]
assert_false(registration.is_public)
for node in registration.get_descendants_recursive():
assert_true(node.is_registration)
assert_false(node.is_public)
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_POST_register_embargo_is_not_public(self, mock_enqueue):
res = self.app.post(
self.project.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.valid_embargo_payload,
content_type='application/json',
auth=self.user.auth
)
assert_equal(res.status_code, 202)
registration = Node.find().sort('-registered_date')[0]
assert_true(registration.is_registration)
assert_false(registration.is_public)
assert_true(registration.is_pending_embargo_for_existing_registration)
assert_is_not_none(registration.embargo)
# Regression test for https://openscience.atlassian.net/browse/OSF-5071
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_POST_register_embargo_does_not_make_project_or_children_public(self, mock_enqueue):
self.project.is_public = True
self.project.save()
component = NodeFactory(
creator=self.user,
parent=self.project,
title='Component',
is_public=True
)
subproject = ProjectFactory(
creator=self.user,
parent=self.project,
title='Subproject',
is_public=True
)
subproject_component = NodeFactory(
creator=self.user,
parent=subproject,
title='Subcomponent',
is_public=True
)
res = self.app.post(
self.project.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.valid_embargo_payload,
content_type='application/json',
auth=self.user.auth
)
self.project.reload()
assert_equal(res.status_code, 202)
assert_equal(res.json['urls']['registrations'], self.project.web_url_for('node_registrations'))
# Last node directly registered from self.project
registration = Node.find(
Q('registered_from', 'eq', self.project)
).sort('-registered_date')[0]
assert_true(registration.is_registration)
assert_false(registration.is_public)
assert_true(registration.is_pending_embargo_for_existing_registration)
assert_is_not_none(registration.embargo)
for node in registration.get_descendants_recursive():
assert_true(node.is_registration)
assert_false(node.is_public)
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_POST_invalid_embargo_end_date_returns_HTTPBad_Request(self, mock_enqueue):
res = self.app.post(
self.project.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.invalid_embargo_date_payload,
content_type='application/json',
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 400)
@mock.patch('framework.celery_tasks.handlers.enqueue_task')
def test_valid_POST_embargo_adds_to_parent_projects_log(self, mock_enquque):
initial_project_logs = len(self.project.logs)
self.app.post(
self.project.api_url_for('register_draft_registration', draft_id=self.draft._id),
self.valid_embargo_payload,
content_type='application/json',
auth=self.user.auth
)
self.project.reload()
# Logs: Created, registered, embargo initiated
assert_equal(len(self.project.logs), initial_project_logs + 1)
@mock.patch('website.project.sanctions.TokenApprovableSanction.ask')
def test_embargoed_registration_set_privacy_requests_embargo_termination(self, mock_ask):
# Initiate and approve embargo
for i in range(3):
c = AuthUserFactory()
self.registration.add_contributor(c, [permissions.ADMIN], auth=Auth(self.user))
self.registration.save()
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
for user_id, embargo_tokens in self.registration.embargo.approval_state.iteritems():
approval_token = embargo_tokens['approval_token']
self.registration.embargo.approve_embargo(User.load(user_id), approval_token)
self.registration.save()
res = self.app.post(
self.registration.api_url_for('project_set_privacy', permissions='public'),
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
for reg in self.registration.node_and_primary_descendants():
reg.reload()
assert_false(reg.is_public)
assert_true(reg.embargo_termination_approval)
assert_true(reg.embargo_termination_approval.is_pending_approval)
def test_cannot_request_termination_on_component_of_embargo(self):
node = ProjectFactory()
child = ProjectFactory(parent=node, creator=node.creator)
with utils.mock_archive(node, embargo=True, autocomplete=True, autoapprove=True) as reg:
with assert_raises(NodeStateError):
reg.nodes[0].request_embargo_termination(Auth(node.creator))
@mock.patch('website.mails.send_mail')
def test_embargoed_registration_set_privacy_sends_mail(self, mock_send_mail):
"""
Integration test for https://github.com/CenterForOpenScience/osf.io/pull/5294#issuecomment-212613668
"""
# Initiate and approve embargo
for i in range(3):
c = AuthUserFactory()
self.registration.add_contributor(c, [permissions.ADMIN], auth=Auth(self.user))
self.registration.save()
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
for user_id, embargo_tokens in self.registration.embargo.approval_state.iteritems():
approval_token = embargo_tokens['approval_token']
self.registration.embargo.approve_embargo(User.load(user_id), approval_token)
self.registration.save()
res = self.app.post(
self.registration.api_url_for('project_set_privacy', permissions='public'),
auth=self.user.auth,
)
assert_equal(res.status_code, 200)
for admin in self.registration.admin_contributors:
assert_true(any([c[0][0] == admin.username for c in mock_send_mail.call_args_list]))
@mock.patch('website.project.sanctions.TokenApprovableSanction.ask')
def test_make_child_embargoed_registration_public_asks_all_admins_in_tree(self, mock_ask):
# Initiate and approve embargo
node = NodeFactory(creator=self.user)
c1 = AuthUserFactory()
child = NodeFactory(parent=node, creator=c1)
c2 = AuthUserFactory()
NodeFactory(parent=child, creator=c2)
registration = RegistrationFactory(project=node)
registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
for user_id, embargo_tokens in registration.embargo.approval_state.iteritems():
approval_token = embargo_tokens['approval_token']
registration.embargo.approve_embargo(User.load(user_id), approval_token)
self.registration.save()
res = self.app.post(
registration.api_url_for('project_set_privacy', permissions='public'),
auth=self.user.auth
)
assert_equal(res.status_code, 200)
asked_admins = [(admin._id, n._id) for admin, n in mock_ask.call_args[0][0]]
for admin, node in registration.get_admin_contributors_recursive():
assert_in((admin._id, node._id), asked_admins)
def test_non_contributor_GET_approval_returns_HTTPError(self):
non_contributor = AuthUserFactory()
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
approval_token = self.registration.embargo.approval_state[self.user._id]['approval_token']
approval_url = self.registration.web_url_for('view_project', token=approval_token)
res = self.app.get(approval_url, auth=non_contributor.auth, expect_errors=True)
assert_equal(http.FORBIDDEN, res.status_code)
assert_true(self.registration.is_pending_embargo)
assert_true(self.registration.embargo_end_date)
def test_non_contributor_GET_disapproval_returns_HTTPError(self):
non_contributor = AuthUserFactory()
self.registration.embargo_registration(
self.user,
datetime.datetime.utcnow() + datetime.timedelta(days=10)
)
self.registration.save()
assert_true(self.registration.is_pending_embargo)
rejection_token = self.registration.embargo.approval_state[self.user._id]['rejection_token']
approval_url = self.registration.web_url_for('view_project', token=rejection_token)
res = self.app.get(approval_url, auth=non_contributor.auth, expect_errors=True)
assert_equal(http.FORBIDDEN, res.status_code)
assert_true(self.registration.is_pending_embargo)
assert_true(self.registration.embargo_end_date)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake (in-memory) hypervisor+api.
Allows nova testing w/o a hypervisor. This module also documents the
semantics of real hypervisor connections.
"""
from oslo.config import cfg
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova import db
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt import virtapi
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
_FAKE_NODES = None
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
get_host_stats()
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = [CONF.host]
class FakeInstance(object):
def __init__(self, name, state):
self.name = name
self.state = state
def __getitem__(self, key):
return getattr(self, key)
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
"""Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.host_status_base = {
'vcpus': 100000,
'memory_mb': 8000000000,
'local_gb': 600000000000,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 100000000000,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
'hypervisor_hostname': CONF.host,
'cpu_info': {},
'disk_available_least': 500000000000,
}
self._mounts = {}
self._interfaces = {}
if not _FAKE_NODES:
set_nodes([CONF.host])
def init_host(self, host):
return
def list_instances(self):
return self.instances.keys()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
name = instance['name']
state = power_state.RUNNING
fake_instance = FakeInstance(name, state)
self.instances[name] = fake_instance
def live_snapshot(self, context, instance, name, update_task_state):
if instance['name'] not in self.instances:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def snapshot(self, context, instance, name, update_task_state):
if instance['name'] not in self.instances:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
pass
@staticmethod
def get_host_ip_addr():
return '192.168.0.1'
def set_admin_password(self, instance, new_pass):
pass
def inject_file(self, instance, b64_path, b64_contents):
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
pass
def unrescue(self, instance, network_info):
pass
def poll_rebooting_instances(self, timeout, instances):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
pass
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
pass
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
pass
def power_off(self, instance):
pass
def power_on(self, context, instance, network_info, block_device_info):
pass
def soft_delete(self, instance):
pass
def restore(self, instance):
pass
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, instance):
pass
def resume(self, instance, network_info, block_device_info=None):
pass
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True, context=None):
key = instance['name']
if key in self.instances:
del self.instances[key]
else:
LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") %
{'key': key,
'inst': self.instances}, instance=instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
instance_name = instance['name']
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
return True
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
pass
return True
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint):
"""Replace the disk attached to the instance."""
instance_name = instance['name']
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = new_connection_info
return True
def attach_interface(self, instance, image_meta, vif):
if vif['id'] in self._interfaces:
raise exception.InterfaceAttachFailed('duplicate')
self._interfaces[vif['id']] = vif
def detach_interface(self, instance, vif):
try:
del self._interfaces[vif['id']]
except KeyError:
raise exception.InterfaceDetachFailed('not attached')
def get_info(self, instance):
if instance['name'] not in self.instances:
raise exception.InstanceNotFound(instance_id=instance['name'])
i = self.instances[instance['name']]
return {'state': i.state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
def get_diagnostics(self, instance_name):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
bw = []
return bw
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
volusage = []
return volusage
def block_stats(self, instance_name, disk_id):
return [0L, 0L, 0L, 0L, None]
def interface_stats(self, instance_name, iface_id):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
def get_vnc_console(self, instance):
return {'internal_access_path': 'FAKE',
'host': 'fakevncconsole.com',
'port': 6969}
def get_spice_console(self, instance):
return {'internal_access_path': 'FAKE',
'host': 'fakespiceconsole.com',
'port': 6969,
'tlsPort': 6970}
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
return True
def refresh_security_group_members(self, security_group_id):
return True
def refresh_instance_security_rules(self, instance):
return True
def refresh_provider_fw_rules(self):
pass
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
if nodename not in _FAKE_NODES:
return {}
dic = {'vcpus': 1,
'memory_mb': 8192,
'local_gb': 1028,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
'hypervisor_hostname': nodename,
'disk_available_least': 0,
'cpu_info': '?'}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
return
def get_instance_disk_info(self, instance_name):
return
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
post_method(context, instance_ref, dest, block_migration,
migrate_data)
return
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return {}
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
return
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
return
def confirm_migration(self, migration, instance, network_info):
return
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, disk, migrate_data=None):
return
def unfilter_instance(self, instance_ref, network_info):
return
def test_remove_vm(self, instance_name):
"""Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
stats = []
for nodename in _FAKE_NODES:
host_status = self.host_status_base.copy()
host_status['hypervisor_hostname'] = nodename
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
stats.append(host_status)
if len(stats) == 0:
raise exception.NovaException("FakeDriver has no node")
elif len(stats) == 1:
return stats[0]
else:
return stats
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
if not mode:
return 'off_maintenance'
return 'on_maintenance'
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
if enabled:
return 'enabled'
return 'disabled'
def get_disk_available_least(self):
pass
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
def get_available_nodes(self, refresh=False):
return _FAKE_NODES
def instance_on_disk(self, instance):
return False
def list_instance_uuids(self):
return []
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
return db.instance_update_and_get_original(context,
instance_uuid,
updates)
def security_group_get_by_instance(self, context, instance):
return db.security_group_get_by_instance(context, instance['uuid'])
def security_group_rule_get_by_security_group(self, context,
security_group):
return db.security_group_rule_get_by_security_group(
context, security_group['id'])
def provider_fw_rule_get_all(self, context):
return db.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return db.agent_build_get_by_triple(context,
hypervisor, os, architecture)
def instance_type_get(self, context, instance_type_id):
return db.instance_type_get(context, instance_type_id)
def block_device_mapping_get_all_by_instance(self, context, instance,
legacy=True):
bdms = db.block_device_mapping_get_all_by_instance(context,
instance['uuid'])
if legacy:
bdms = block_device.legacy_mapping(bdms)
return bdms
def block_device_mapping_update(self, context, bdm_id, values):
return db.block_device_mapping_update(context, bdm_id, values)
| |
#!/usr/bin/env python
# coding=utf-8
"""
Name : time_event.py
Author : David M. Freestone
Date : 03.22.2016
This time_event module is covered under the modified BSD license
Copyright (c) 2016, David M. Freestone
All rights reserved.
"""
import pandas as pd
import numpy as np
from glob import glob
from os.path import basename, join, splitext, expanduser
from datetime import datetime
from shutil import move
from numpy import zeros
from numba import jit, float64, int64
# TODO(David): Do we want a notion of "settings" for different file types?
# If so, need a way to get/return the settings for a particular file type
# and a way to update these settings (see file_formats)
# TODO(David): What kind of file_format specific stuff should we have?
def load_file(f, names=None, settings=None):
"""Load a single file into a DataFrame"""
if names is None and settings is not None:
names = names_from_settings(settings)
df = pd.read_csv(f, engine="c", header=0, index_col=False,
names=names)
df["filename"] = basename(f)
return df
def load_files(files, names=None):
"""Load a list of files into a DataFrame"""
__load_file__ = lambda f: load_file(f, names=names)
return pd.concat(list(map(__load_file__, files)))
def load_directory(path, ext, names=None):
"""Load a directory of files into a DataFrame"""
ext = "*"+ext if "." in ext else "*."+ext
files = glob(join(path, ext))
return load_files(files, names)
# @jit(nopython=True)
def cumulative_changes(x):
return changes(x).cumsum()
# @jit(nopython=True)
def changes(x):
x = np.hstack((0, x))
return x[:-1] != x[1:]
@jit(float64[:, :](float64[:], float64, float64, int64, float64), nopython=True)
def mpc_to_tec(rawdata, resolution=0.005, event_tol=0.0001,
max_event=62, max_time=36000):
"""Convert mpc format (time.event) to [time, event]
Note: This version is 130x faster than the previous pandas version
"""
# TODO(David): Write this
nrows = rawdata.shape[0]
time_event = np.zeros((nrows, 3))
max_previous_time = 0
for idx, datum in enumerate(rawdata):
time = np.floor(datum)
event = 1000 * (datum - time)
if (event <= 0) or (event > max_event):
continue
time *= resolution
# TODO(David): Implement sorting?
# Maybe if this is fast enough, just do a gruopby?
# Then we can uncomment the line below
if (time < 0) or (time > max_time): # or (time < max_previous_time):
continue
max_previous_time = time
# The event has to be close enough to an integer to be considered real
if abs(event-np.round(event)) >= event_tol:
continue
time_event[idx, 0] = True
time_event[idx, 1] = time
time_event[idx, 2] = np.round(event)
return time_event
@jit(int64[:](int64[:], int64[:]), nopython=True)
def trialdef(events, pattern):
"""Return the trial number
Searches for 'pattern' using a simple Finite State Machine
and returns the trial number (or 0) for every event
"""
if (pattern[0] < 0) or (pattern[-1] < 0):
raise ValueError("Must start and end with an event")
event_index = 0
event_count = events.size - 1
pattern_index = 0
pattern_count = pattern.size - 1
remove_index = -1
remove_code = zeros(1 + pattern_count)
trial_number = 0
trial = zeros(1 + event_count, dtype=np.int64)
start_index = 0
while event_index <= event_count:
if ((pattern_index == 0) and
(events[event_index] == pattern[0])):
pattern_index += 1
remove_index = -1
start_index = event_index
elif ((pattern_index == pattern_count) and
(events[event_index] == pattern[pattern_count])):
pattern_index = 0
remove_index = -1
trial_number += 1
trial[start_index:1 + event_index] = trial_number
elif events[event_index] == pattern[pattern_index]:
pattern_index += 1
remove_index = -1
elif (pattern_index > 0) and (events[event_index] == pattern[0]):
pattern_index = 0
event_index -= 1
elif remove_index > 0:
for i in range(1 + remove_index):
if events[event_index] == remove_code[i]:
event_index = start_index
break
if pattern[pattern_index] < 0:
while pattern[pattern_index] < 0:
remove_index += 1
remove_code[remove_index] = abs(pattern[pattern_index])
pattern_index += 1
event_index += 1
return trial
def habitest_load_directory(path, ext):
names = ["time", "event_type", "event_id",
"event", "register", "comment"]
df = load_directory(path, ext, names=names)
return df
def isChurchMPCFile(f):
try:
float(f.split(".")[1])
return True
except:
return False
def mpc_load_directory(path):
files = glob(join(path, "*"))
files = list(filter(isChurchMPCFile, files))
df = load_files(files, names=["rawdata"])
df = pandas_mpc_to_tec(df)
return df
# TODO(David): This function is are horribly slow
def pandas_mpc_subject_id(filenames):
return filenames.str.split(".").str[0].str[-4:].astype(int)
# TODO(David): This function is are horribly slow
def pandas_mpc_session_number(filenames):
"""Return the session number given a Series of file names
(this is probably the fastest way to do it)
"""
return filenames.str.split(".").str[1].astype(int)
def pandas_mpc_to_tec(df, resolution=0.005, event_tol=0.0001,
max_event=62):
"""Return time and event columns in a DataFrame
(and drop the rawdata column)
"""
tec = mpc_to_tec(df.rawdata.as_matrix())
df = df.drop("rawdata", axis=1)
df["time"] = tec[:, 1]
df["event"] = tec[:, 2]
return df[tec[:, 0] > 0]
def isBadSubjectId(f):
sub = f.split(".")[0][-4:]
return int(sub) > 8000
def remove_trailing_zero(f):
fname, ext = splitext(f)
return fname[:-1] + ext
def rename_bad_mpc_files(fname):
if isChurchMPCFile(fname) and isBadSubjectId(fname):
new_fname = remove_trailing_zero(fname)
move(fname, new_fname)
return new_fname
def rename_bad_mpc_files_in_path(path):
"""Written to turn say, 18050 to 1805"""
files = glob(join(path, "*"))
files = list(map(rename_bad_mpc_files, files))
return files
def isBadSessionId(f):
try:
ext = float(splitext(f)[1])
return ext > 0.2
except:
return False
def rename_bad_mpc_sessions(fname):
if isBadSessionId(fname):
new_fname, ext = splitext(fname)
ext = str(round(float(ext)/10, 3))
new_fname += "."+ext.split(".")[1]
move(fname, new_fname)
return new_fname
def rename_bad_mpc_sessions_in_path(path):
"""Written to turn 0.41 into 0.041"""
files = glob(join(path, "*"))
files = list(map(rename_bad_mpc_sessions, files))
return files
"""
def remove_exit_codes(df):
return df[df.event_type != "Exit"]
def remove_input_event(df, event):
return df[(df.event != event+"On")
& (df.event != event+"Off")]
# TODO(David): What's the best way to integrate low level commands
# with high level stuff that knows how to work on dataframes?
# Should it be something like the seaborn or statsmodels API
# Where we have to give it the columns? The worry is that it
# then just becomes unnecessary to use these functions?
# Unless these literally just becomes a file of useful functions?
def session_timestamp(f):
# Habitest
name = f.split("_")
subject = int(name[0])
session =datetime(*list(map(int, name[1:4]))).timestamp()
return subject, session
def eventnames_from_ids(ids, names):
# TODO(David): Write this
pass
def ids_from_eventnames(names, ids):
# TODO(David): Write this
pass
def subset(df):
# TODO(David): Write this
pass
def relative_time(time, events):
# TODO(David): Write this
pass
def cumulative_trial(trial):
# TODO(David): Write this
pass
def trial_def(events, pattern):
# TODO(David): Write this
pass
"""
| |
# Copyright 2013 - Mirantis, Inc.
# Copyright 2016 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import logging
import os
from oslo_utils import importutils
import requests
AUTH_TOKEN = 'auth_token'
SESSION = 'session'
CACERT = 'cacert'
CERT_FILE = 'cert'
CERT_KEY = 'key'
INSECURE = 'insecure'
PROJECT_ID = 'project_id'
USER_ID = 'user_id'
REGION_NAME = 'region_name'
TARGET_AUTH_TOKEN = 'target_auth_token'
TARGET_SESSION = 'target_session'
TARGET_AUTH_URI = 'target_auth_url'
TARGET_PROJECT_ID = 'target_project_id'
TARGET_USER_ID = 'target_user_id'
TARGET_INSECURE = 'target_insecure'
TARGET_SERVICE_CATALOG = 'target_service_catalog'
TARGET_REGION_NAME = 'target_region_name'
TARGET_USER_DOMAIN_NAME = 'target_user_domain_name'
TARGET_PROJECT_DOMAIN_NAME = 'target_project_domain_name'
osprofiler_web = importutils.try_import("osprofiler.web")
LOG = logging.getLogger(__name__)
def log_request(func):
def decorator(self, *args, **kwargs):
resp = func(self, *args, **kwargs)
LOG.debug("HTTP %s %s %d", resp.request.method, resp.url,
resp.status_code)
return resp
return decorator
class HTTPClient(object):
def __init__(self, base_url, **kwargs):
self.base_url = base_url
self.session = kwargs.get('session')
if not self.session:
self.session = requests.Session()
self.auth_token = kwargs.get(AUTH_TOKEN)
self.project_id = kwargs.get(PROJECT_ID)
self.user_id = kwargs.get(USER_ID)
self.cacert = kwargs.get(CACERT)
self.insecure = kwargs.get(INSECURE, False)
self.region_name = kwargs.get(REGION_NAME)
self.ssl_options = {}
self.target_session = kwargs.get(TARGET_SESSION)
self.target_auth_token = kwargs.get(TARGET_AUTH_TOKEN)
self.target_auth_uri = kwargs.get(TARGET_AUTH_URI)
self.target_user_id = kwargs.get(TARGET_USER_ID)
self.target_project_id = kwargs.get(TARGET_PROJECT_ID)
self.target_service_catalog = kwargs.get(TARGET_SERVICE_CATALOG)
self.target_region_name = kwargs.get(TARGET_REGION_NAME)
self.target_insecure = kwargs.get(TARGET_INSECURE)
self.target_user_domain_name = kwargs.get(TARGET_USER_DOMAIN_NAME)
self.target_project_domain_name = kwargs.get(
TARGET_PROJECT_DOMAIN_NAME
)
if self.base_url.startswith('https'):
if self.cacert and not os.path.exists(self.cacert):
raise ValueError('Unable to locate cacert file '
'at %s.' % self.cacert)
if self.cacert and self.insecure:
LOG.warning('Client is set to not verify even though '
'cacert is provided.')
if self.insecure:
self.ssl_options['verify'] = False
else:
if self.cacert:
self.ssl_options['verify'] = self.cacert
else:
self.ssl_options['verify'] = True
self.ssl_options['cert'] = (
kwargs.get(CERT_FILE),
kwargs.get(CERT_KEY)
)
@log_request
def get(self, url, headers=None):
options = self._get_request_options('get', headers)
return self.session.get(self.base_url + url, **options)
@log_request
def post(self, url, body, headers=None):
options = self._get_request_options('post', headers)
return self.session.post(self.base_url + url, data=body, **options)
@log_request
def put(self, url, body, headers=None):
options = self._get_request_options('put', headers)
return self.session.put(self.base_url + url, data=body, **options)
@log_request
def delete(self, url, headers=None):
options = self._get_request_options('delete', headers)
return self.session.delete(self.base_url + url, **options)
def _get_request_options(self, method, headers):
headers = self._update_headers(headers)
if method in ['post', 'put']:
content_type = headers.get('content-type', 'application/json')
headers['content-type'] = content_type
options = copy.deepcopy(self.ssl_options)
options['headers'] = headers
return options
def _update_headers(self, headers):
if not headers:
headers = {}
if isinstance(self.session, requests.Session):
if self.auth_token:
headers['X-Auth-Token'] = self.auth_token
if self.project_id:
headers['X-Project-Id'] = self.project_id
if self.user_id:
headers['X-User-Id'] = self.user_id
if self.region_name:
headers['X-Region-Name'] = self.region_name
if self.target_auth_token:
headers['X-Target-Auth-Token'] = self.target_auth_token
if self.target_auth_uri:
headers['X-Target-Auth-Uri'] = self.target_auth_uri
if self.target_project_id:
headers['X-Target-Project-Id'] = self.target_project_id
if self.target_user_id:
headers['X-Target-User-Id'] = self.target_user_id
if self.target_insecure:
# Note(akovi): due to changes in requests, this parameter
# must be a string. Basically, it is a truthy value on
# the server side.
headers['X-Target-Insecure'] = str(self.target_insecure)
if self.target_region_name:
headers['X-Target-Region-Name'] = self.target_region_name
if self.target_user_domain_name:
headers['X-Target-User-Domain-Name'] = self.target_user_domain_name
if self.target_project_domain_name:
h_name = 'X-Target-Project-Domain-Name'
headers[h_name] = self.target_project_domain_name
if self.target_service_catalog:
headers['X-Target-Service-Catalog'] = base64.b64encode(
self.target_service_catalog.encode('utf-8')
)
if osprofiler_web:
# Add headers for osprofiler.
headers.update(osprofiler_web.get_trace_id_headers())
return headers
| |
# -*- coding: utf-8 -*-
# File: transform.py
import numpy as np
import cv2
from ...utils.argtools import log_once
from .base import ImageAugmentor, _default_repr
TransformAugmentorBase = ImageAugmentor
"""
Legacy alias. Please don't use.
"""
# This legacy augmentor requires us to import base from here, causing circular dependency.
# Should remove this in the future.
__all__ = ["Transform", "ResizeTransform", "CropTransform", "FlipTransform",
"TransformList", "TransformFactory"]
# class WrappedImgFunc(object):
# def __init__(self, func, need_float=False, cast_back=True, fix_ndim=True):
# self.func = func
# self.need_float = need_float
# self.cast_back = cast_back
# def __call__(self, img):
# old_dtype = img.dtype
# old_ndim = img.ndim
# if self.need_float:
# img = img.astype("float32")
# img = self.func(img)
# if self.cast_back and old_dtype == np.uint8 and img.dtype != np.uint8:
# img = np.clip(img, 0, 255.)
# if self.cast_back:
# img = img.astype(old_dtype)
# if self.fix_ndim and old_ndim == 3 and img.ndim == 2:
# img = img[:, :, np.newaxis]
# return img
class BaseTransform(object):
"""
Base class for all transforms, for type-check only.
Users should never interact with this class.
"""
def _init(self, params=None):
if params:
for k, v in params.items():
if k != 'self' and not k.startswith('_'):
setattr(self, k, v)
class Transform(BaseTransform):
"""
A deterministic image transformation, used to implement
the (probably random) augmentors.
This class is also the place to provide a default implementation to any
:meth:`apply_xxx` method.
The current default is to raise NotImplementedError in any such methods.
All subclasses should implement `apply_image`.
The image should be of type uint8 in range [0, 255], or
floating point images in range [0, 1] or [0, 255]
Some subclasses may implement `apply_coords`, when applicable.
It should take and return a numpy array of Nx2, where each row is the (x, y) coordinate.
The implementation of each method may choose to modify its input data
in-place for efficient transformation.
"""
def __init__(self):
# provide an empty __init__, so that __repr__ will work nicely
pass
def __getattr__(self, name):
if name.startswith("apply_"):
def f(x):
raise NotImplementedError("{} does not implement method {}".format(self.__class__.__name__, name))
return f
raise AttributeError("Transform object has no attribute {}".format(name))
def __repr__(self):
try:
return _default_repr(self)
except AssertionError as e:
log_once(e.args[0], 'warn')
return super(Transform, self).__repr__()
__str__ = __repr__
class ResizeTransform(Transform):
"""
Resize the image.
"""
def __init__(self, h, w, new_h, new_w, interp):
"""
Args:
h, w (int):
new_h, new_w (int):
interp (int): cv2 interpolation method
"""
super(ResizeTransform, self).__init__()
self._init(locals())
def apply_image(self, img):
assert img.shape[:2] == (self.h, self.w)
ret = cv2.resize(
img, (self.new_w, self.new_h),
interpolation=self.interp)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
return coords
class CropTransform(Transform):
"""
Crop a subimage from an image.
"""
def __init__(self, y0, x0, h, w):
super(CropTransform, self).__init__()
self._init(locals())
def apply_image(self, img):
return img[self.y0:self.y0 + self.h, self.x0:self.x0 + self.w]
def apply_coords(self, coords):
coords[:, 0] -= self.x0
coords[:, 1] -= self.y0
return coords
class WarpAffineTransform(Transform):
def __init__(self, mat, dsize, interp=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0):
super(WarpAffineTransform, self).__init__()
self._init(locals())
def apply_image(self, img):
ret = cv2.warpAffine(img, self.mat, self.dsize,
flags=self.interp,
borderMode=self.borderMode,
borderValue=self.borderValue)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
def apply_coords(self, coords):
coords = np.concatenate((coords, np.ones((coords.shape[0], 1), dtype='f4')), axis=1)
coords = np.dot(coords, self.mat.T)
return coords
class FlipTransform(Transform):
"""
Flip the image.
"""
def __init__(self, h, w, horiz=True):
"""
Args:
h, w (int):
horiz (bool): whether to flip horizontally or vertically.
"""
self._init(locals())
def apply_image(self, img):
if self.horiz:
return img[:, ::-1]
else:
return img[::-1]
def apply_coords(self, coords):
if self.horiz:
coords[:, 0] = self.w - coords[:, 0]
else:
coords[:, 1] = self.h - coords[:, 1]
return coords
class TransposeTransform(Transform):
"""
Transpose the image.
"""
def apply_image(self, img):
ret = cv2.transpose(img)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
def apply_coords(self, coords):
return coords[:, ::-1]
class NoOpTransform(Transform):
"""
A Transform that does nothing.
"""
def __getattr__(self, name):
if name.startswith("apply_"):
return lambda x: x
raise AttributeError("NoOpTransform object has no attribute {}".format(name))
class PhotometricTransform(Transform):
"""
A transform which only has `apply_image` but does nothing in `apply_coords`.
"""
def __init__(self, func, name=None):
"""
Args:
func (img -> img): a function to be used for :meth:`apply_image`
name (str, optional): the name of this transform
"""
self._func = func
self._name = name
def apply_image(self, img):
return self._func(img)
def apply_coords(self, coords):
return coords
def __repr__(self):
return "imgaug.PhotometricTransform({})".format(self._name if self._name else "")
__str__ = __repr__
class TransformFactory(Transform):
"""
Create a :class:`Transform` from user-provided functions.
"""
def __init__(self, name=None, **kwargs):
"""
Args:
name (str, optional): the name of this transform
**kwargs: mapping from `'apply_xxx'` to implementation of such functions.
"""
for k, v in kwargs.items():
if k.startswith('apply_'):
setattr(self, k, v)
else:
raise KeyError("Unknown argument '{}' in TransformFactory!".format(k))
self._name = name
def __str__(self):
return "imgaug.TransformFactory({})".format(self._name if self._name else "")
__repr__ = __str__
"""
Some meta-transforms:
they do not perform actual transformation, but delegate to another Transform.
"""
class TransformList(BaseTransform):
"""
Apply a list of transforms sequentially.
"""
def __init__(self, tfms):
"""
Args:
tfms (list[Transform]):
"""
for t in tfms:
assert isinstance(t, BaseTransform), t
self.tfms = tfms
def _apply(self, x, meth):
for t in self.tfms:
x = getattr(t, meth)(x)
return x
def __getattr__(self, name):
if name.startswith("apply_"):
return lambda x: self._apply(x, name)
raise AttributeError("TransformList object has no attribute {}".format(name))
def __str__(self):
repr_each_tfm = ",\n".join([" " + repr(x) for x in self.tfms])
return "imgaug.TransformList([\n{}])".format(repr_each_tfm)
def __add__(self, other):
other = other.tfms if isinstance(other, TransformList) else [other]
return TransformList(self.tfms + other)
def __iadd__(self, other):
other = other.tfms if isinstance(other, TransformList) else [other]
self.tfms.extend(other)
return self
def __radd__(self, other):
other = other.tfms if isinstance(other, TransformList) else [other]
return TransformList(other + self.tfms)
__repr__ = __str__
class LazyTransform(BaseTransform):
"""
A transform that's instantiated at the first call to `apply_image`.
"""
def __init__(self, get_transform):
"""
Args:
get_transform (img -> Transform): a function which will be used to instantiate a Transform.
"""
self.get_transform = get_transform
self._transform = None
def apply_image(self, img):
if not self._transform:
self._transform = self.get_transform(img)
return self._transform.apply_image(img)
def _apply(self, x, meth):
assert self._transform is not None, \
"LazyTransform.{} can only be called after the transform has been applied on an image!"
return getattr(self._transform, meth)(x)
def __getattr__(self, name):
if name.startswith("apply_"):
return lambda x: self._apply(x, name)
raise AttributeError("TransformList object has no attribute {}".format(name))
def __repr__(self):
if self._transform is None:
return "LazyTransform(get_transform={})".format(str(self.get_transform))
else:
return repr(self._transform)
__str__ = __repr__
def apply_coords(self, coords):
return self._apply(coords, "apply_coords")
if __name__ == '__main__':
shape = (100, 100)
center = (10, 70)
mat = cv2.getRotationMatrix2D(center, 20, 1)
trans = WarpAffineTransform(mat, (130, 130))
def draw_points(img, pts):
for p in pts:
try:
img[int(p[1]), int(p[0])] = 0
except IndexError:
pass
image = cv2.imread('cat.jpg')
image = cv2.resize(image, shape)
orig_image = image.copy()
coords = np.random.randint(100, size=(20, 2))
draw_points(orig_image, coords)
print(coords)
for _ in range(1):
coords = trans.apply_coords(coords)
image = trans.apply_image(image)
print(coords)
draw_points(image, coords)
# viz = cv2.resize(viz, (1200, 600))
orig_image = cv2.resize(orig_image, (600, 600))
image = cv2.resize(image, (600, 600))
viz = np.concatenate((orig_image, image), axis=1)
cv2.imshow("mat", viz)
cv2.waitKey()
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Document matcher for Search API stub.
DocumentMatcher provides an approximation of the Search API's query matching.
"""
import datetime
from google.appengine.datastore import document_pb
from google.appengine._internal.antlr3 import tree
from google.appengine.api.search import geo_util
from google.appengine.api.search import query_parser
from google.appengine.api.search import QueryParser
from google.appengine.api.search import search_util
from google.appengine.api.search.stub import simple_tokenizer
from google.appengine.api.search.stub import tokens
MSEC_PER_DAY = 86400000
INEQUALITY_COMPARISON_TYPES = [
QueryParser.GT,
QueryParser.GE,
QueryParser.LESSTHAN,
QueryParser.LE,
]
class ExpressionTreeException(Exception):
"""An error occurred while analyzing/translating the expression parse tree."""
def __init__(self, msg):
Exception.__init__(self, msg)
class DistanceMatcher(object):
"""A class to match on geo distance."""
def __init__(self, geopoint, distance):
self._geopoint = geopoint
self._distance = distance
def _CheckOp(self, op):
if op == QueryParser.EQ or op == QueryParser.HAS:
raise ExpressionTreeException('Equality comparison not available for Geo type')
if op == QueryParser.NE:
raise ExpressionTreeException('!= comparison operator is not available')
if op not in (QueryParser.GT, QueryParser.GE, QueryParser.LESSTHAN, QueryParser.LE):
raise search_util.UnsupportedOnDevError(
'Operator %s not supported for distance matches on development server.'
% str(op))
def _IsDistanceMatch(self, distance, op):
if op == QueryParser.GT or op == QueryParser.GE:
return distance >= self._distance
if op == QueryParser.LESSTHAN or op == QueryParser.LE:
return distance <= self._distance
else:
raise AssertionError, 'unexpected op %s' % str(op)
def IsMatch(self, field_values, op):
self._CheckOp(op)
if not field_values:
return False
return self._IsDistanceMatch(min([
geo_util.LatLng(field_value.geo().lat(), field_value.geo().lng())
- self._geopoint for field_value in field_values]), op)
class DocumentMatcher(object):
"""A class to match documents with a query."""
def __init__(self, query, inverted_index):
self._query = query
self._inverted_index = inverted_index
self._parser = simple_tokenizer.SimpleTokenizer()
def _PostingsForToken(self, token):
"""Returns the postings for the token."""
return self._inverted_index.GetPostingsForToken(token)
def _PostingsForFieldToken(self, field, value):
"""Returns postings for the value occurring in the given field."""
value = simple_tokenizer.NormalizeString(value)
return self._PostingsForToken(
tokens.Token(chars=value, field_name=field))
def _MatchRawPhraseWithRawAtom(self, field_text, phrase_text):
tokenized_phrase = self._parser.TokenizeText(
phrase_text, input_field_type=document_pb.FieldValue.ATOM)
tokenized_field_text = self._parser.TokenizeText(
field_text, input_field_type=document_pb.FieldValue.ATOM)
return tokenized_phrase == tokenized_field_text
def _MatchPhrase(self, field, match, document):
"""Match a textual field with a phrase query node."""
raw_field_text = field.value().string_value()
raw_phrase_text = query_parser.GetPhraseQueryNodeText(match)
if field.value().type() == document_pb.FieldValue.ATOM:
return self._MatchRawPhraseWithRawAtom(raw_field_text, raw_phrase_text)
if not raw_phrase_text:
return False
if field.value().type() == document_pb.FieldValue.UNTOKENIZED_PREFIX:
phrase = self._parser.Normalize(raw_phrase_text, field.value().type())
field_text = self._parser.Normalize(raw_field_text, field.value().type())
return field_text.startswith(phrase)
phrase = self._parser.TokenizeText(raw_phrase_text)
field_text = self._parser.TokenizeText(raw_field_text)
if not phrase:
return True
posting = None
for post in self._PostingsForFieldToken(field.name(), phrase[0].chars):
if post.doc_id == document.id():
posting = post
break
if not posting:
return False
def ExtractWords(token_list):
return (token.chars for token in token_list)
for position in posting.positions:
match_words = zip(ExtractWords(field_text[position:]),
ExtractWords(phrase))
if len(match_words) != len(phrase):
continue
match = True
for doc_word, match_word in match_words:
if (field.value().type() == document_pb.FieldValue.TOKENIZED_PREFIX and
doc_word.startswith(match_word)):
continue
if doc_word != match_word:
match = False
if match:
return True
return False
def _MatchTextField(self, field, match, document):
"""Check if a textual field matches a query tree node."""
if match.getType() == QueryParser.FUZZY:
return self._MatchTextField(field, match.getChild(0), document)
if match.getType() == QueryParser.VALUE:
if query_parser.IsPhrase(match):
return self._MatchPhrase(field, match, document)
normalized_query = self._parser.Normalize(
query_parser.GetQueryNodeText(match), field.value().type())
normalized_text_field = self._parser.Normalize(
field.value().string_value(), field.value().type())
if field.value().type() == document_pb.FieldValue.ATOM:
return normalized_query == normalized_text_field
if field.value().type() == document_pb.FieldValue.UNTOKENIZED_PREFIX:
return normalized_text_field.startswith(normalized_query)
query_tokens = self._parser.TokenizeText(
query_parser.GetQueryNodeText(match))
if not query_tokens:
return True
if len(query_tokens) > 1:
def QueryNode(token):
token_text = self._parser.Normalize(token.chars, field.value().type())
return query_parser.CreateQueryNode(token_text, QueryParser.TEXT)
return all(self._MatchTextField(field, QueryNode(token), document)
for token in query_tokens)
token_text = self._parser.Normalize(query_tokens[0].chars,
field.value().type())
matching_docids = [
post.doc_id for post in self._PostingsForFieldToken(
field.name(), token_text)]
return document.id() in matching_docids
def ExtractGlobalEq(node):
op = node.getType()
if ((op == QueryParser.EQ or op == QueryParser.HAS) and
len(node.children) >= 2):
if node.children[0].getType() == QueryParser.GLOBAL:
return node.children[1]
return node
if match.getType() == QueryParser.CONJUNCTION:
return all(self._MatchTextField(field, ExtractGlobalEq(child), document)
for child in match.children)
if match.getType() == QueryParser.DISJUNCTION:
return any(self._MatchTextField(field, ExtractGlobalEq(child), document)
for child in match.children)
if match.getType() == QueryParser.NEGATION:
raise ExpressionTreeException('Unable to compare \"' + field.name() +
'\" with negation')
return False
def _GetFieldName(self, field):
"""Get the field name of the given field node."""
if isinstance(field, tree.CommonTree):
return query_parser.GetQueryNodeText(field)
return field
def _IsValidDateValue(self, value):
"""Returns whether value is a valid date."""
try:
datetime.datetime.strptime(value, '%Y-%m-%d')
except ValueError:
return False
return True
def _IsValidNumericValue(self, value):
"""Returns whether value is a valid number."""
try:
float(value)
except ValueError:
return False
return True
def _CheckValidDateComparison(self, field_name, match):
"""Check if match is a valid date value."""
if match.getType() == QueryParser.FUNCTION:
name, _ = match.children
raise ExpressionTreeException('Unable to compare "%s" with "%s()"' %
(field_name, name))
elif match.getType() == QueryParser.VALUE:
match_val = query_parser.GetPhraseQueryNodeText(match)
if not self._IsValidDateValue(match_val):
raise ExpressionTreeException('Unable to compare "%s" with "%s"' %
(field_name, match_val))
def _MatchDateField(self, field, match, operator, document):
"""Check if a date field matches a query tree node."""
try:
self._CheckValidDateComparison(field.name(), match)
except ExpressionTreeException:
return False
return self._MatchComparableField(
field, match, _DateStrToDays, operator, document)
def _MatchNumericField(self, field, match, operator, document):
"""Check if a numeric field matches a query tree node."""
return self._MatchComparableField(field, match, float, operator, document)
def _MatchGeoField(self, field, matcher, operator, document):
"""Check if a geo field matches a query tree node."""
if not isinstance(matcher, DistanceMatcher):
return False
field = self._GetFieldName(field)
values = [field.value() for field in
search_util.GetAllFieldInDocument(document, field) if
field.value().type() == document_pb.FieldValue.GEO]
return matcher.IsMatch(values, operator)
def _MatchComparableField(
self, field, match, cast_to_type, op, document):
"""A generic method to test matching for comparable types.
Comparable types are defined to be anything that supports <, >, <=, >=, ==.
For our purposes, this is numbers and dates.
Args:
field: The document_pb.Field to test
match: The query node to match against
cast_to_type: The type to cast the node string values to
op: The query node type representing the type of comparison to perform
document: The document that the field is in
Returns:
True iff the field matches the query.
Raises:
UnsupportedOnDevError: Raised when an unsupported operator is used, or
when the query node is of the wrong type.
ExpressionTreeException: Raised when a != inequality operator is used.
"""
field_val = cast_to_type(field.value().string_value())
if match.getType() == QueryParser.VALUE:
try:
match_val = cast_to_type(query_parser.GetPhraseQueryNodeText(match))
except ValueError:
return False
else:
return False
if op == QueryParser.EQ or op == QueryParser.HAS:
return field_val == match_val
if op == QueryParser.NE:
raise ExpressionTreeException('!= comparison operator is not available')
if op == QueryParser.GT:
return field_val > match_val
if op == QueryParser.GE:
return field_val >= match_val
if op == QueryParser.LESSTHAN:
return field_val < match_val
if op == QueryParser.LE:
return field_val <= match_val
raise search_util.UnsupportedOnDevError(
'Operator %s not supported for numerical fields on development server.'
% match.getText())
def _MatchAnyField(self, field, match, operator, document):
"""Check if a field matches a query tree.
Args:
field: the name of the field, or a query node containing the field.
match: A query node to match the field with.
operator: The query node type corresponding to the type of match to
perform (eg QueryParser.EQ, QueryParser.GT, etc).
document: The document to match.
Raises:
ExpressionTreeException: when != operator is used or right hand side of
numeric inequality is not a numeric constant.
"""
fields = search_util.GetAllFieldInDocument(document,
self._GetFieldName(field))
return any(self._MatchField(f, match, operator, document) for f in fields)
def _MatchField(self, field, match, operator, document):
"""Check if a field matches a query tree.
Args:
field: a document_pb.Field instance to match.
match: A query node to match the field with.
operator: The a query node type corresponding to the type of match to
perform (eg QueryParser.EQ, QueryParser.GT, etc).
document: The document to match.
"""
if field.value().type() in search_util.TEXT_DOCUMENT_FIELD_TYPES:
if operator != QueryParser.EQ and operator != QueryParser.HAS:
return False
return self._MatchTextField(field, match, document)
if field.value().type() in search_util.NUMBER_DOCUMENT_FIELD_TYPES:
return self._MatchNumericField(field, match, operator, document)
if field.value().type() == document_pb.FieldValue.DATE:
return self._MatchDateField(field, match, operator, document)
if field.value().type() == document_pb.FieldValue.GEO:
return False
type_name = document_pb.FieldValue.ContentType_Name(
field.value().type()).lower()
raise search_util.UnsupportedOnDevError(
'Matching fields of type %s is unsupported on dev server (searched for '
'field %s)' % (type_name, field.name()))
def _MatchGlobal(self, match, document):
for field in document.field_list():
if (field.value().type() == document_pb.FieldValue.UNTOKENIZED_PREFIX or
field.value().type() == document_pb.FieldValue.TOKENIZED_PREFIX):
continue
try:
if self._MatchAnyField(field.name(), match, QueryParser.EQ, document):
return True
except search_util.UnsupportedOnDevError:
pass
return False
def _ResolveDistanceArg(self, node):
if node.getType() == QueryParser.VALUE:
return query_parser.GetQueryNodeText(node)
if node.getType() == QueryParser.FUNCTION:
name, args = node.children
if name.getText() == 'geopoint':
lat, lng = (float(query_parser.GetQueryNodeText(v)) for v in args.children)
return geo_util.LatLng(lat, lng)
return None
def _MatchFunction(self, node, match, operator, document):
name, args = node.children
if name.getText() == 'distance':
x, y = args.children
x, y = self._ResolveDistanceArg(x), self._ResolveDistanceArg(y)
if isinstance(x, geo_util.LatLng) and isinstance(y, basestring):
x, y = y, x
if isinstance(x, basestring) and isinstance(y, geo_util.LatLng):
match_val = query_parser.GetQueryNodeText(match)
try:
distance = float(match_val)
except ValueError:
raise ExpressionTreeException('Unable to compare "%s()" with "%s"' %
(name, match_val))
matcher = DistanceMatcher(y, distance)
return self._MatchGeoField(x, matcher, operator, document)
return False
def _IsHasGlobalValue(self, node):
if node.getType() == QueryParser.HAS and len(node.children) == 2:
if (node.children[0].getType() == QueryParser.GLOBAL and
node.children[1].getType() == QueryParser.VALUE):
return True
return False
def _MatchGlobalPhrase(self, node, document):
"""Check if a document matches a parsed global phrase."""
if not all(self._IsHasGlobalValue(child) for child in node.children):
return False
value_nodes = (child.children[1] for child in node.children)
phrase_text = ' '.join(
(query_parser.GetQueryNodeText(node) for node in value_nodes))
for field in document.field_list():
if self._MatchRawPhraseWithRawAtom(field.value().string_value(),
phrase_text):
return True
return False
def _CheckMatch(self, node, document):
"""Check if a document matches a query tree.
Args:
node: the query node to match
document: the document to match
Returns:
True iff the query node matches the document.
Raises:
ExpressionTreeException: when != operator is used or numeric value is used
in comparison for DATE field.
"""
if node.getType() == QueryParser.SEQUENCE:
result = all(self._CheckMatch(child, document) for child in node.children)
return result or self._MatchGlobalPhrase(node, document)
if node.getType() == QueryParser.CONJUNCTION:
return all(self._CheckMatch(child, document) for child in node.children)
if node.getType() == QueryParser.DISJUNCTION:
return any(self._CheckMatch(child, document) for child in node.children)
if node.getType() == QueryParser.NEGATION:
return not self._CheckMatch(node.children[0], document)
if node.getType() == QueryParser.NE:
raise ExpressionTreeException('!= comparison operator is not available')
if node.getType() in query_parser.COMPARISON_TYPES:
lhs, match = node.children
if lhs.getType() == QueryParser.GLOBAL:
return self._MatchGlobal(match, document)
elif lhs.getType() == QueryParser.FUNCTION:
return self._MatchFunction(lhs, match, node.getType(), document)
field_name = self._GetFieldName(lhs)
if node.getType() in INEQUALITY_COMPARISON_TYPES:
try:
float(query_parser.GetPhraseQueryNodeText(match))
except ValueError:
self._CheckValidDateComparison(field_name, match)
elif (self._IsValidDateValue(field_name) or
self._IsValidNumericValue(field_name)):
raise ExpressionTreeException('Invalid field name "%s"' % field_name)
return self._MatchAnyField(lhs, match, node.getType(), document)
return False
def Matches(self, document):
return self._CheckMatch(self._query, document)
def FilterDocuments(self, documents):
return (doc for doc in documents if self.Matches(doc))
def _DateStrToDays(date_str):
date = search_util.DeserializeDate(date_str)
return search_util.EpochTime(date) / MSEC_PER_DAY
| |
from __future__ import absolute_import, print_function
import unittest
import tempfile
from bokeh.document import Document
from bokeh.model import Model
from bokeh.core.property_mixins import FillProps, LineProps, TextProps
from bokeh.core.properties import Int, String
from bokeh.themes import Theme
class ThemedModel(Model):
number = Int(42)
string = String("hello")
class SubOfThemedModel(ThemedModel):
another_string = String("world")
class TestThemes(unittest.TestCase):
def test_construct_empty_theme_from_file(self):
with (tempfile.NamedTemporaryFile()) as file:
# create and apply empty theme with no exception thrown
file.file.write("".encode('utf-8'))
file.file.flush()
theme = Theme(filename=file.name)
theme.apply_to_model(ThemedModel())
def test_construct_empty_theme_from_json(self):
# create and apply empty theme with no exception thrown
theme = Theme(json=dict())
theme.apply_to_model(ThemedModel())
def test_construct_no_json_or_filename(self):
with self.assertRaises(ValueError) as manager:
Theme()
self.assertTrue("requires json or a filename" in repr(manager.exception))
def test_construct_json_and_filename(self):
with self.assertRaises(ValueError) as manager:
# we check "" and {} as falsey values, to try to trick
# our code into thinking they weren't provided.
Theme(filename="", json={})
self.assertTrue("not both" in repr(manager.exception))
def test_construct_bad_attrs(self):
with self.assertRaises(ValueError) as manager:
Theme(json=dict(attrs=42))
self.assertTrue("should be a dictionary of class names" in repr(manager.exception))
def test_construct_bad_class_props(self):
with self.assertRaises(ValueError) as manager:
Theme(json=dict(attrs=dict(SomeClass=42)))
self.assertTrue("should be a dictionary of properties" in repr(manager.exception))
def test_construct_nonempty_theme_from_file(self):
with (tempfile.NamedTemporaryFile()) as file:
# create and apply empty theme with no exception thrown
file.file.write("""
attrs:
ThemedModel:
number: 57
SubOfThemedModel:
another_string: "boo"
""".encode('utf-8'))
file.file.flush()
theme = Theme(filename=file.name)
self.assertDictEqual(dict(number=57), theme._for_class(ThemedModel))
self.assertDictEqual(dict(number=57, another_string="boo"), theme._for_class(SubOfThemedModel))
def test_theming_a_model(self):
theme = Theme(json={
'attrs' : {
'ThemedModel' : {
'string' : 'w00t'
}
}
})
obj = ThemedModel()
changes = dict(calls=[])
self.assertEqual('hello', obj.string)
def record_trigger(attr, old, new_):
changes['calls'].append((attr, old, new_))
obj.on_change('string', record_trigger)
theme.apply_to_model(obj)
self.assertEqual('w00t', obj.string)
self.assertEqual([('string', 'hello', 'w00t')], changes['calls'])
def test_theming_a_model_via_base(self):
theme = Theme(json={
'attrs' : {
'ThemedModel' : {
'string' : 'w00t'
}
}
})
obj = SubOfThemedModel()
changes = dict(calls=[])
def record_trigger(attr, old, new_):
changes['calls'].append((attr, old, new_))
obj.on_change('string', record_trigger)
self.assertEqual('hello', obj.string)
theme.apply_to_model(obj)
self.assertEqual('w00t', obj.string)
self.assertEqual([('string', 'hello', 'w00t')], changes['calls'])
def test_subclass_theme_used_rather_than_base(self):
theme = Theme(json={
'attrs' : {
'ThemedModel' : {
'string' : 'w00t'
},
'SubOfThemedModel' : {
'string' : 'bar'
}
}
})
obj = SubOfThemedModel()
self.assertEqual('hello', obj.string)
changes = dict(calls=[])
def record_trigger(attr, old, new_):
changes['calls'].append((attr, old, new_))
obj.on_change('string', record_trigger)
theme.apply_to_model(obj)
self.assertEqual('bar', obj.string)
self.assertEqual([('string', 'hello', 'bar')], changes['calls'])
def test_theming_a_document_after_adding_root(self):
theme = Theme(json={
'attrs' : {
'ThemedModel' : {
'string' : 'w00t'
}
}
})
obj = ThemedModel()
doc = Document()
doc.add_root(obj)
self.assertEqual('hello', obj.string)
changes = dict(calls=[])
def record_trigger(attr, old, new_):
changes['calls'].append((attr, old, new_))
obj.on_change('string', record_trigger)
doc.theme = theme
self.assertIs(doc.theme, theme)
self.assertEqual('w00t', obj.string)
doc.remove_root(obj)
self.assertEqual('hello', obj.string)
self.assertEqual([('string', 'hello', 'w00t'),
('string', 'w00t', 'hello')], changes['calls'])
def test_theming_a_document_before_adding_root(self):
theme = Theme(json={
'attrs' : {
'ThemedModel' : {
'string' : 'w00t'
}
}
})
obj = ThemedModel()
doc = Document()
self.assertEqual('hello', obj.string)
doc.theme = theme
self.assertIs(doc.theme, theme)
changes = dict(calls=[])
def record_trigger(attr, old, new_):
changes['calls'].append((attr, old, new_))
obj.on_change('string', record_trigger)
doc.add_root(obj)
self.assertEqual('w00t', obj.string)
doc.remove_root(obj)
self.assertEqual('hello', obj.string)
self.assertEqual([('string', 'hello', 'w00t'),
('string', 'w00t', 'hello')], changes['calls'])
def test_setting_document_theme_to_none(self):
theme = Theme(json={
'attrs' : {
'ThemedModel' : {
'string' : 'w00t'
}
}
})
obj = ThemedModel()
doc = Document()
doc.add_root(obj)
changes = dict(calls=[])
def record_trigger(attr, old, new_):
changes['calls'].append((attr, old, new_))
obj.on_change('string', record_trigger)
doc.theme = theme
self.assertEqual('w00t', obj.string)
# setting to None reverts to default theme
doc.theme = None
self.assertIsNot(doc.theme, None)
self.assertEqual('hello', obj.string)
self.assertEqual([('string', 'hello', 'w00t'),
('string', 'w00t', 'hello')], changes['calls'])
def _compare_dict_to_model_class_defaults(self, props, model_class):
model = model_class()
for name, value in props.items():
property = model.lookup(name)
if property is None:
raise RuntimeError("Model %r has no property %s" % (model, name))
default = property.class_default(model_class)
if default != value:
print("%s.%s differs default %r theme %r" % (model_class.__name__, name, default, value))
else:
print("%s.%s default %r is identical in the theme" % (model_class.__name__, name, default))
def _compare_dict_to_model_defaults(self, props, model_name):
import bokeh.models as models
import bokeh.models.widgets as widgets
if hasattr(models, model_name):
self._compare_dict_to_model_class_defaults(props, getattr(models, model_name))
elif hasattr(widgets, model_name):
self._compare_dict_to_model_class_defaults(props, getattr(widgets, model_name))
else:
raise RuntimeError("Could not find class for " + model_name)
def test_default_theme_is_empty(self):
# this is kind of a silly test once we fix default.yaml to be empty :-)
# the point is to list all the things to fix.
doc = Document()
# before each assertion, we print out why it's going to fail.
for class_name, props in doc.theme._json['attrs'].items():
self._compare_dict_to_model_defaults(props, class_name)
self.assertEqual(0, len(doc.theme._json['attrs']))
self._compare_dict_to_model_class_defaults(doc.theme._fill_defaults, FillProps)
self.assertEqual(0, len(doc.theme._fill_defaults))
self._compare_dict_to_model_class_defaults(doc.theme._text_defaults, TextProps)
self.assertEqual(0, len(doc.theme._text_defaults))
self._compare_dict_to_model_class_defaults(doc.theme._line_defaults, LineProps)
self.assertEqual(0, len(doc.theme._line_defaults))
| |
'''
Created on Feb 1, 2019
@author: Mark V Systems Limited
(c) Copyright 2019 Mark V Systems Limited, All rights reserved.
This plugin allows GUI and command line users to test transforms.
Custom extensions are also made available when their plugin has been loaded
(e.g., for SEC custom transforms use plugin transforms/SEC, validate/EFM or EdgarRenderer)
Errors are shown both in results field as well as reported in the message pane (GUI) and reported in log (command line).
For GUI operation tools -> transformation tester:
Select registry (ixt v1-4, ixt-sec)
Select or enter transform
Enter source text
Press "transform"
Result (or error code)
For command line operation:
arelleCmdLine --plugins transforms/tester --testTransform 'registry name transformation name pattern' (space separated)
note: the transform name may be optionally prefixed
results or errors are in the log
arelleCmdLine --plugins transforms/tester --testTransform 'ixt v3 datedaymonthen 29th February' or
arelleCmdLine --plugins transforms/tester --testTransform 'ixt v3 ixt:datedaymonthen 29th February'
SEC transform example:
arelleCmdLine --plugins 'transforms/tester|transforms/SEC' --testTransform "ixt-sec durwordsen 23 days"
Help instructions and list of available transforms:
arelleCmdLine --plugins transforms/tester --testTransform 'help' (or '?') or
arelleCmdLine --plugins 'transforms/tester|transforms/SEC' --testTransform 'help'
For REST API operation:
web browser: http://localhost:8080/rest/xbrl/validation?plugins=transforms/tester&testTransform=ixt v3 datedaymonthen 29th February
cmd line: curl 'http://localhost:8080/rest/xbrl/validation?plugins=transforms/tester&testTransform=ixt%20v3%20datedaymonthen%2029th%20February'
'''
import os, re, logging
from optparse import SUPPRESS_HELP
from arelle.FunctionIxt import ixtNamespaces, ixtNamespaceFunctions
from arelle.ModelFormulaObject import Trace
from arelle.XmlUtil import setXmlns
from arelle import ModelDocument, ModelXbrl, ValidateXbrl, XbrlConst, XPathParser, XPathContext
class TransformTester:
def __init__(self, cntlr, isCmdLine=False):
self.cntlr = cntlr
# setup tester
xml = "<rootElement/>"
self.modelXbrl = ModelXbrl.create(cntlr.modelManager, ModelDocument.Type.UnknownNonXML, initialXml=xml, isEntry=True)
self.validator = ValidateXbrl.ValidateXbrl(self.modelXbrl)
self.validator.validate(self.modelXbrl) # required to set up
cntlr.showStatus(_("Initializing Formula Grammar"))
XPathParser.initializeParser(cntlr.modelManager)
cntlr.showStatus(None)
self.trRegs = sorted(ixtNamespaces.keys())
self.trPrefixNSs = dict((qn.prefix, qn.namespaceURI)
for qn in self.modelXbrl.modelManager.customTransforms.keys())
self.trRegs.extend(sorted(self.trPrefixNSs.keys()))
self.trPrefixNSs.update(ixtNamespaces)
def getTrNames(self, trReg):
trNS = self.trPrefixNSs[trReg]
trPrefix = trReg.split()[0] # for ixt remove TRn part
setXmlns(self.modelXbrl.modelDocument, trPrefix, trNS)
if trNS in ixtNamespaceFunctions:
return sorted("{}:{}".format(trPrefix, key)
for key in ixtNamespaceFunctions[trNS].keys())
# custom transforms
return sorted(str(trQn)
for trQn in self.modelXbrl.modelManager.customTransforms.keys()
if trQn.prefix == trPrefix)
def transform(self, trReg, trName, sourceValue):
try:
trNS = self.trPrefixNSs[trReg]
trPrefix = trReg.split()[0] # for ixt remove TRn part
setXmlns(self.modelXbrl.modelDocument, trPrefix, trNS)
self.modelXbrl.modelManager.showStatus(_("Executing call"))
elt = self.modelXbrl.modelDocument.xmlRootElement
if ':' in trName:
prefixedFnName = trName
else:
prefixedFnName = "{}:{}".format(trPrefix, trName)
callExprStack = XPathParser.parse(self.validator,
'{}("{}")'.format(prefixedFnName, sourceValue),
elt, trName + " call", Trace.CALL)
xpathContext = XPathContext.create(self.modelXbrl, sourceElement=elt)
result = xpathContext.evaluate(callExprStack)
while result and isinstance(result, (tuple,list,set)):
result = next(iter(result)) # de-sequence result
return result
except XPathContext.XPathException as err:
self.modelXbrl.error(err.code, err.message)
return err
def cmdLineOptionExtender(parser, *args, **kwargs):
parser.add_option("--testTransform",
action="store",
dest="testTransform",
help=_("Test a transformation registry transform. "
"Enter 'help' or '?' for a list of transformation registries available. "
"Enter registry name, space, transformation name, space and pattern. "
"E.g., 'ixt v3 datedaymonthen 29th February' or ixt v3 ixt:datedaymonthen 29th February'. "))
def cmdLineRun(cntlr, options, *args, **kwargs):
if options.testTransform:
tester = TransformTester(cntlr)
arg = options.testTransform
argWord, _sep, rest = arg.partition(" ")
trReg = None
for _regName in tester.trPrefixNSs.keys():
if arg.startswith(_regName):
trReg = _regName
rest = arg[len(trReg)+1:]
if trReg is None or argWord in ("help", "?"):
cntlr.addToLog("Registries available: {}".format(", ".join(tester.trRegs)),
messageCode="tester:registries", level=logging.INFO)
else:
trName, _sep, sourceValue = rest.partition(" ")
print("reg {} name {} source {}".format(trReg, trName, sourceValue))
result = tester.transform(trReg, trName, sourceValue)
tester.modelXbrl.info("tester:transformation",
"%(registry)s %(transformName)s source '%(sourceValue)s' result '%(resultValue)s'",
registry=trReg, transformName=trName, sourceValue=sourceValue, resultValue=str(result))
tester.modelXbrl.close()
def transformationTesterMenuExtender(cntlr, menu, *args, **kwargs):
# define tkinger only when running in GUI mode, not available in cmd line or web server modes
from tkinter import Toplevel, StringVar, N, S, E, EW, W
try:
from tkinter.ttk import Frame, Button, Entry
except ImportError:
from ttk import Frame, Button
from arelle.UiUtil import gridHdr, gridCell, gridCombobox, label, checkbox
from arelle.CntlrWinTooltip import ToolTip
class DialogTransformTester(Toplevel):
def __init__(self, tester):
self.tester = tester
self.mainWin = tester.cntlr
parent = self.mainWin.parent
super(DialogTransformTester, self).__init__(parent)
self.parent = parent
parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", parent.geometry())
dialogX = int(parentGeometry.group(3))
dialogY = int(parentGeometry.group(4))
self.selectedGroup = None
self.transient(self.parent)
self.title(_("Transformation Tester"))
frame = Frame(self)
# load grid
trRegLabel = label(frame, 0, 0, _("Registry:"))
trReg = self.tester.trRegs[-1] # default is latest
self.trRegName = gridCombobox(frame, 1, 0,
value=trReg,
values=self.tester.trRegs,
comboboxselected=self.dialogTrRegComboBoxSelected)
trRegToolTipMessage = _("Select Transformation Registry")
ToolTip(self.trRegName, text=trRegToolTipMessage, wraplength=360)
ToolTip(trRegLabel, text=trRegToolTipMessage, wraplength=360)
trNameLabel = label(frame, 0, 1, _("Transform:"))
self.trNameName = gridCombobox(frame, 1, 1,
value="",
values=self.tester.getTrNames(trReg),
comboboxselected=self.dialogTrNameComboBoxSelected)
trRegToolTipMessage = _("Select or enter transform")
ToolTip(self.trRegName, text=trRegToolTipMessage, wraplength=360)
ToolTip(trRegLabel, text=trRegToolTipMessage, wraplength=360)
sourceLabel = label(frame, 0, 2, _("Source text:"))
ToolTip(sourceLabel, text=_("Enter the source text which is to be transformed. "), wraplength=240)
self.sourceVar = StringVar()
self.sourceVar.set("")
sourceEntry = Entry(frame, textvariable=self.sourceVar, width=50)
sourceLabel.grid(row=2, column=0, sticky=W)
sourceEntry.grid(row=2, column=1, sticky=EW, pady=3, padx=3)
resultLabel = label(frame, 1, 3, _("Result:"))
ToolTip(sourceLabel, text=_("Transformation result. "), wraplength=240)
self.resultVar = StringVar()
self.resultVar.set("")
resultEntry = Entry(frame, textvariable=self.resultVar, width=50)
resultLabel.grid(row=3, column=0, sticky=W)
resultEntry.grid(row=3, column=1, sticky=EW, pady=3, padx=3)
self.mainWin.showStatus(None)
btnPad = 2 if self.mainWin.isMSW else 0 # buttons too narrow on windows
okButton = Button(frame, text=_("Transform"), width=8 + btnPad, command=self.dialogOk)
cancelButton = Button(frame, text=_("Done"), width=4 + btnPad, command=self.dialogClose)
cancelButton.grid(row=4, column=0, sticky=E, columnspan=2, pady=3, padx=3)
okButton.grid(row=4, column=0, sticky=E, columnspan=2, pady=3, padx=64)
ToolTip(okButton, text=_("Transform the source entered. "), wraplength=240)
ToolTip(cancelButton, text=_("Close this dialog. "), wraplength=240)
frame.grid(row=0, column=0, sticky=(N,S,E,W))
frame.columnconfigure(1, weight=3)
frame.columnconfigure(2, weight=1)
window = self.winfo_toplevel()
window.columnconfigure(0, weight=1)
self.geometry("+{0}+{1}".format(dialogX+150,dialogY+100))
#self.bind("<Return>", self.ok)
#self.bind("<Escape>", self.close)
self.protocol("WM_DELETE_WINDOW", self.dialogClose)
self.grab_set()
self.wait_window(self)
def dialogOk(self, event=None):
result = self.tester.transform(
self.trRegName.get(),
self.trNameName.value,
self.sourceVar.get())
if isinstance(result, XPathContext.XPathException):
self.resultVar.set(str(result))
else:
self.resultVar.set(str(result))
def dialogClose(self, event=None):
self.tester.modelXbrl.close()
self.parent.focus_set()
self.destroy()
def dialogTrRegComboBoxSelected(self, *args):
self.trNameName["values"] = self.tester.getTrNames( self.trRegName.get() )
def dialogTrNameComboBoxSelected(self, *args):
pass
def guiTransformationTester():
tester = TransformTester(cntlr, isCmdLine=True)
DialogTransformTester( tester )
tester.modelXbrl.close()
menu.add_command(label="Transformtion Tester",
underline=0,
command=lambda: guiTransformationTester() )
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Transformation tester',
'version': '1.0',
'description': '''Transformation Tester''',
'license': 'Apache-2',
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2019 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrCmdLine.Options': cmdLineOptionExtender,
'CntlrCmdLine.Utility.Run': cmdLineRun,
'CntlrWinMain.Menu.Tools': transformationTesterMenuExtender
}
| |
#!/usr/bin/env python
"""
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
try:
from queue import Queue
except ImportError:
from Queue import Queue
import axis_ep
module = 'axis_srl_register'
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("test_%s.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o test_%s.vvp %s" % (module, src)
def dut_axis_srl_register(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser):
if os.system(build_cmd):
raise Exception("Error running build command")
return Cosimulation("vvp -m myhdl test_%s.vvp -lxt2" % module,
clk=clk,
rst=rst,
current_test=current_test,
input_axis_tdata=input_axis_tdata,
input_axis_tvalid=input_axis_tvalid,
input_axis_tready=input_axis_tready,
input_axis_tlast=input_axis_tlast,
input_axis_tuser=input_axis_tuser,
output_axis_tdata=output_axis_tdata,
output_axis_tvalid=output_axis_tvalid,
output_axis_tready=output_axis_tready,
output_axis_tlast=output_axis_tlast,
output_axis_tuser=output_axis_tuser)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_axis_tdata = Signal(intbv(0)[8:])
input_axis_tvalid = Signal(bool(0))
input_axis_tlast = Signal(bool(0))
input_axis_tuser = Signal(bool(0))
output_axis_tready = Signal(bool(0))
# Outputs
input_axis_tready = Signal(bool(0))
output_axis_tdata = Signal(intbv(0)[8:])
output_axis_tvalid = Signal(bool(0))
output_axis_tlast = Signal(bool(0))
output_axis_tuser = Signal(bool(0))
# sources and sinks
source_queue = Queue()
source_pause = Signal(bool(0))
sink_queue = Queue()
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource(clk,
rst,
tdata=input_axis_tdata,
tvalid=input_axis_tvalid,
tready=input_axis_tready,
tlast=input_axis_tlast,
tuser=input_axis_tuser,
fifo=source_queue,
pause=source_pause,
name='source')
sink = axis_ep.AXIStreamSink(clk,
rst,
tdata=output_axis_tdata,
tvalid=output_axis_tvalid,
tready=output_axis_tready,
tlast=output_axis_tlast,
tuser=output_axis_tuser,
fifo=sink_queue,
pause=sink_pause,
name='sink')
# DUT
dut = dut_axis_srl_register(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
print("test 1: test packet")
current_test.next = 1
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: longer packet")
current_test.next = 2
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(256)))
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield clk.posedge
print("test 3: test packet with pauses")
current_test.next = 3
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame)
yield clk.posedge
yield delay(64)
yield clk.posedge
source_pause.next = True
yield delay(32)
yield clk.posedge
source_pause.next = False
yield delay(64)
yield clk.posedge
sink_pause.next = True
yield delay(32)
yield clk.posedge
sink_pause.next = False
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 4: back-to-back packets")
current_test.next = 4
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 5: alternate pause source")
current_test.next = 5
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 6: alternate pause sink")
current_test.next = 6
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x01\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x02\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
while input_axis_tvalid or output_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 7: tuser assert")
current_test.next = 7
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10')
test_frame.user = 1
source_queue.put(test_frame)
yield clk.posedge
yield output_axis_tlast.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
assert rx_frame.user[-1]
yield delay(100)
raise StopSimulation
return dut, source, sink, clkgen, check
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations(object):
"""NetworkInterfacesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterface"
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "_models.NetworkInterface"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterface"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "_models.NetworkInterface"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkInterface"]
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_11_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterface"
"""Updates a network interface tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to update network interface tags.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def _get_effective_route_table_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.EffectiveRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._get_effective_route_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
def begin_get_effective_route_table(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.EffectiveRouteListResult"]
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_11_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
def _list_effective_network_security_groups_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.EffectiveNetworkSecurityGroupListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.EffectiveNetworkSecurityGroupListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._list_effective_network_security_groups_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def begin_list_effective_network_security_groups(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.EffectiveNetworkSecurityGroupListResult"]
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_11_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.EffectiveNetworkSecurityGroupListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceListResult"]
"""Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'} # type: ignore
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'} # type: ignore
def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterface"
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_network_interface.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_virtual_machine_scale_set_ip_configurations(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkInterfaceIPConfigurationListResult"]
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_ip_configurations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
def get_virtual_machine_scale_set_ip_configuration(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
ip_configuration_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkInterfaceIPConfiguration"
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration.
:type ip_configuration_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_ip_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
| |
'''
Info:
This script loads root files containing standard
b-tagging information + the IPMP outputs.
It turns the data into jet-flat structure,
replicates the variable creation and modification
that are present in MV2, scales the variables
and splits them into training and testing sets.
Finally, the data is stored as dictionaries in
HDF5 format. Parallelized using joblib.
Author:
Michela Paganini - Yale/CERN
michela.paganini@cern.ch
Example:
python parallel_generate_data_DL1 ./variables.yaml ../data/final_production/*.root
'''
import glob
import pandas as pd
import numpy as np
import math
import os
import sys
import logging
import yaml
import deepdish.io as io
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# -- custom utility functions defined in this folder
from utils import configure_logging
from data_utils import replaceInfNaN, apply_calojet_cuts, reweight_to_l
def main(yaml_file, root_paths, model_id):
'''
Args:
-----
root_paths: list of strings with the root file paths
Returns:
--------
n_train_events: total number of events for training, across all root files
n_test_events: total number of events for testing, across all root files
n_validate_events: total number of events for validating, across all root files
Alternatively, you can return the paths to hdf5 files being created, for logging
'''
# -- logging
configure_logging()
logger = logging.getLogger("parallel_generate_data_DL1")
logger.debug('Files to process: {}'.format(root_paths))
# -- open and process files in parallel
from joblib import Parallel, delayed
n_events = Parallel(n_jobs=-1, verbose=5, backend="multiprocessing") \
(delayed(process)(i, filepath, yaml_file, model_id) for i, filepath in enumerate(root_paths))
# -- add up events in the list of results to get the total number of events per type
n_train_events = sum(zip(*n_events)[0])
n_test_events = sum(zip(*n_events)[1])
n_validate_events = sum(zip(*n_events)[2])
logger.info('There are {n_train_events} training events, {n_test_events} testing events,\
and {n_validate_events} validating events'.format(
n_train_events=n_train_events,
n_test_events=n_test_events,
n_validate_events=n_validate_events
)
)
return n_train_events, n_test_events, n_validate_events
# -- Alternatively, you can return the paths to hdf5 files being created, for logging
# hdf5_paths = Parallel(n_jobs=-1, verbose=5, backend="multiprocessing") \
# (delayed(f)(i, filepath) for i, filepath in enumerate(root_paths))
# logger.debug('Saved the following hdf5 archives: {}'.format(hdf5_paths))
# return hdf5_paths
# -----------------------------------------------------------------
def process(i, filepath, yaml_file, model_id):
'''
'''
import pandautils as pup
# -- load branches from yaml file
branches, training_vars, ip3d_training_vars, ipmp_training_vars = set_features(yaml_file)
logger = logging.getLogger("ETL Service")
# -- load root file to dataframe
logger.info('Operating on {}'.format(filepath))
logger.info('Creating dataframe...')
df = pup.root2panda(filepath, 'bTag_AntiKt4EMTopoJets', branches=branches)
# -- create MV2 input quantities, set default values
logger.info('Transforming variables...')
df = transformVars(df)
# -- flatten to jet-flat structure
logger.info('Flattening df...')
df.drop(['PVx', 'PVy', 'PVz'], axis=1, inplace=True)
df_flat = pd.DataFrame({k: pup.flatten(c) for k, c in df.iteritems()})
del df
# --apply standard cuts on AntiKT4EMTopoJets
logger.info('Applying cuts...')
df_flat = apply_calojet_cuts(df_flat)
# -- create numpy arrays for ML
logger.info('Creating X, y, w, mv2c10...')
y = df_flat['jet_LabDr_HadF'].values
mv2c10 = df_flat['jet_mv2c10'].values
jet_pt = df_flat['jet_pt'].values
ip3d_vars = df_flat[ip3d_training_vars].values
ipmp_vars = df_flat[ipmp_training_vars].values
# -- slice df by only keeping the training variables
X = df_flat[training_vars].values
# -- Find weights by reweighting to the light distribution
pteta = df_flat[['jet_pt', 'abs(jet_eta)']].values
w = reweight_to_l(pteta, y, pt_col=0, eta_col=1)
del df_flat, pteta
# -- shuffle data, split into train and test
logger.info('Shuffling, splitting, scaling...')
ix = np.array(range(len(y)))
X_train, X_test,\
y_train, y_test,\
w_train, w_test,\
ix_train, ix_test, \
mv2c10_train, mv2c10_test,\
jet_pt_train, jet_pt_test,\
ip3d_vars_train, ip3d_vars_test,\
ipmp_vars_train, ipmp_vars_test = train_test_split(
X, y, w, ix, mv2c10, jet_pt, ip3d_vars, ipmp_vars, train_size=0.6
)
# -- scale inputs to 0 mean, 1 std
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
ip3d_vars_train = scaler.fit_transform(ip3d_vars_train)
ip3d_vars_test = scaler.transform(ip3d_vars_test)
ipmp_vars_train = scaler.fit_transform(ipmp_vars_train)
ipmp_vars_test = scaler.transform(ipmp_vars_test)
# -- split the previously selected training data into train and validate
X_train, X_validate,\
y_train, y_validate,\
w_train, w_validate,\
ix_train, ix_validate,\
mv2c10_train, mv2c10_validate,\
jet_pt_train, jet_pt_validate,\
ip3d_vars_train, ip3d_vars_validate,\
ipmp_vars_train, ipmp_vars_validate = train_test_split(
X_train, y_train, w_train, ix_train, mv2c10_train, jet_pt_train, ip3d_vars_train, ipmp_vars_train, train_size=0.7
)
# -- assign train, test, validate data to dictionaries
train = {
'X' : X_train,
'ip3d_vars': ip3d_vars_train,
'ipmp_vars': ipmp_vars_train,
'y' : y_train,
'w' : w_train,
'ix': ix_train,
'mv2c10': mv2c10_train,
'pt': jet_pt_train
}
test = {
'X' : X_test,
'ip3d_vars': ip3d_vars_test,
'ipmp_vars': ipmp_vars_test,
'y' : y_test,
'w' : w_test,
'ix': ix_test,
'mv2c10': mv2c10_test,
'pt': jet_pt_test
}
validate = {
'X' : X_validate,
'ip3d_vars': ip3d_vars_validate,
'ipmp_vars': ipmp_vars_validate,
'y' : y_validate,
'w' : w_validate,
'ix': ix_validate,
'mv2c10': mv2c10_validate,
'pt': jet_pt_validate
}
# -- save dictionaries to hdf5
logger.info('Saving dictionaries to hdf5...')
hdf5_train_path = os.path.join('..', 'data', 'DL1-' + model_id + str(i) +'-train-db.h5')
hdf5_test_path = os.path.join('..', 'data', 'DL1-' + model_id + str(i) +'-test-db.h5')
hdf5_validate_path = os.path.join('..', 'data', 'DL1-' + model_id + str(i) +'-validate-db.h5')
io.save(hdf5_train_path, train)
io.save(hdf5_test_path, test)
io.save(hdf5_validate_path, validate)
logger.debug('Saved hdf5 archives: {}, {}, {}'. format(hdf5_train_path, hdf5_test_path, hdf5_validate_path))
return (y_train.shape[0], y_test.shape[0], y_validate.shape[0])
#return (hdf5_train_path, hdf5_test_path, hdf5_validate_path)
# -----------------------------------------------------------------
def set_features(yaml_file):
'''
Info:
-----
Load names of branches to use from a yaml file
This will contain 4 entries: 'branches', 'training_vars', 'ip3d_training_vars', 'ipmp_training_vars'
- 'branches': list of names of the branches to directly extract from the TTree
- 'training_vars': list of names of variables to always be used for learning
- 'ip3d_training_vars': list of names of variables to be used for\
learning only if we want to include the ip3d vars
- 'ipmp_training_vars': list of names of variables to be used for\
learning only if we want to include the ipmp vars
Returns:
--------
'''
with open(yaml_file, 'r') as stream:
try:
s = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
return s['branches'], s['training_vars'], s['ip3d_training_vars'], s['ipmp_training_vars']
# -----------------------------------------------------------------
def transformVars(df):
'''
modifies the variables to create the ones that mv2 uses, inserts default values when needed, saves new variables
in the dataframe
Args:
-----
df: pandas dataframe containing all the interesting variables as extracted from the .root file
Returns:
--------
modified mv2-compliant dataframe
'''
from rootpy.vector import LorentzVector, Vector3
import pandautils as pup
# -- modify features and set default values
df['abs(jet_eta)'] = abs(df['jet_eta'])
# -- create new IPxD features
for (pu,pb,pc) in zip(df['jet_ip2d_pu'],df['jet_ip2d_pb'],df['jet_ip2d_pc']) :
pu[np.logical_or(pu >= 10, pu <-1)] = -1
pb[np.logical_or(pu >= 10, pu <-1)] = -1
pc[np.logical_or(pu >= 10, pu <-1)] = -1
for (pu,pb,pc) in zip(df['jet_ip3d_pu'],df['jet_ip3d_pb'],df['jet_ip3d_pc']) :
pu[pu >= 10] = -1
pb[pu >= 10] = -1
pc[pu >= 10] = -1
df['jet_ip2'] = (df['jet_ip2d_pb'] / df['jet_ip2d_pu']).apply(lambda x : np.log( x )).apply(lambda x: replaceInfNaN(x, -20))
df['jet_ip2_c'] = (df['jet_ip2d_pb'] / df['jet_ip2d_pc']).apply(lambda x : np.log( x )).apply(lambda x: replaceInfNaN(x, -20))
df['jet_ip2_cu'] = (df['jet_ip2d_pc'] / df['jet_ip2d_pu']).apply(lambda x : np.log( x )).apply(lambda x: replaceInfNaN(x, -20))
df['jet_ip3'] = (df['jet_ip3d_pb'] / df['jet_ip3d_pu']).apply(lambda x : np.log( x )).apply(lambda x: replaceInfNaN(x, -20))
df['jet_ip3_c'] = (df['jet_ip3d_pb'] / df['jet_ip3d_pc']).apply(lambda x : np.log( x )).apply(lambda x: replaceInfNaN(x, -20))
df['jet_ip3_cu'] = (df['jet_ip3d_pc'] / df['jet_ip3d_pu']).apply(lambda x : np.log( x )).apply(lambda x: replaceInfNaN(x, -20))
# -- create new IPMP features
for (pu,pb,pc) in zip(df['jet_ipmp_pu'],df['jet_ipmp_pb'],df['jet_ipmp_pc']) :
pu[pu >= 10] = -1
pb[pu >= 10] = -1
pc[pu >= 10] = -1
df['jet_ip'] = (df['jet_ipmp_pb'] / df['jet_ipmp_pu']).apply(lambda x : np.log( x )).apply(lambda x: replaceInfNaN(x, -20))
df['jet_ip_c'] = (df['jet_ipmp_pb'] / df['jet_ipmp_pc']).apply(lambda x : np.log( x )).apply(lambda x: replaceInfNaN(x, -20))
df['jet_ip_cu'] = (df['jet_ipmp_pc'] / df['jet_ipmp_pu']).apply(lambda x : np.log( x )).apply(lambda x: replaceInfNaN(x, -20))
# -- SV1 features
dx = df['jet_sv1_vtx_x']-df['PVx']
dy = df['jet_sv1_vtx_y']-df['PVy']
dz = df['jet_sv1_vtx_z']-df['PVz']
v_jet = LorentzVector()
pv2sv = Vector3()
sv1_L3d = []
sv1_Lxy = []
dR = []
for index, dxi in enumerate(dx): # loop thru events
sv1_L3d_ev = []
sv1L_ev = []
dR_ev = []
for jet in xrange(len(dxi)): # loop thru jets
v_jet.SetPtEtaPhiM(df['jet_pt'][index][jet], df['jet_eta'][index][jet], df['jet_phi'][index][jet], df['jet_m'][index][jet])
if (dxi[jet].size != 0):
sv1_L3d_ev.append(np.sqrt(pow(dx[index][jet], 2) + pow(dy[index][jet], 2) + pow(dz[index][jet], 2))[0])
sv1L_ev.append(math.hypot(dx[index][jet], dy[index][jet]))
pv2sv.SetXYZ(dx[index][jet], dy[index][jet], dz[index][jet])
jetAxis = Vector3(v_jet.Px(), v_jet.Py(), v_jet.Pz())
dR_ev.append(pv2sv.DeltaR(jetAxis))
else:
dR_ev.append(-1)
sv1L_ev.append(-100)
sv1_L3d_ev.append(-100)
sv1_Lxy.append(sv1L_ev)
dR.append(dR_ev)
sv1_L3d.append(sv1_L3d_ev)
df['jet_sv1_dR'] = dR
df['jet_sv1_Lxy'] = sv1_Lxy
df['jet_sv1_L3d'] = sv1_L3d
# -- add more default values for sv1 variables
sv1_vtx_ok = pup.match_shape(np.asarray([len(el) for event in df['jet_sv1_vtx_x'] for el in event]), df['jet_pt'])
for (ok4event, sv1_ntkv4event, sv1_n2t4event, sv1_mass4event, sv1_efrc4event, sv1_sig34event) in zip(sv1_vtx_ok, df['jet_sv1_ntrkv'], df['jet_sv1_n2t'], df['jet_sv1_m'], df['jet_sv1_efc'], df['jet_sv1_sig3d']):
sv1_ntkv4event[np.asarray(ok4event) == 0] = -1
sv1_n2t4event[np.asarray(ok4event) == 0] = -1
sv1_mass4event[np.asarray(ok4event) == 0] = -1000
sv1_efrc4event[np.asarray(ok4event) == 0] = -1
sv1_sig34event[np.asarray(ok4event) == 0] = -100
# -- JF features
jf_dR = []
for eventN, (etas, phis, masses) in enumerate(zip(df['jet_jf_deta'], df['jet_jf_dphi'], df['jet_jf_m'])): # loop thru events
jf_dR_ev = []
for m in xrange(len(masses)): # loop thru jets
if (masses[m] > 0):
jf_dR_ev.append(np.sqrt(etas[m] * etas[m] + phis[m] * phis[m]))
else:
jf_dR_ev.append(-10)
jf_dR.append(jf_dR_ev)
df['jet_jf_dR'] = jf_dR
# -- add more default values for jf variables
for (jf_mass,jf_n2tv,jf_ntrkv,jf_nvtx,jf_nvtx1t,jf_efrc,jf_sig3) in zip(df['jet_jf_m'],df['jet_jf_n2t'],df['jet_jf_ntrkAtVx'],df['jet_jf_nvtx'],df['jet_jf_nvtx1t'],df['jet_jf_efc'],df['jet_jf_sig3d']):
jf_n2tv[jf_mass <= 0] = -1;
jf_ntrkv[jf_mass <= 0] = -1;
jf_nvtx[jf_mass <= 0] = -1;
jf_nvtx1t[jf_mass <= 0]= -1;
jf_mass[jf_mass <= 0] = -1e3;
jf_efrc[jf_mass <= 0] = -1;
jf_sig3[jf_mass <= 0] = -100;
return df
# -----------------------------------------------------------------
if __name__ == '__main__':
import argparse
# -- read in arguments
parser = argparse.ArgumentParser()
# Look at the yaml file in this directory for an example
parser.add_argument('variables', type=str, help="path to the yaml file containing lists named\
'branches','training_vars','ip3d_training_vars','ipmp_training_vars'") # my design choice
# Give a name to your model for future retrieval
parser.add_argument('model_id', help="token to identify the model")
# Path to the root files
parser.add_argument('input', type=str, nargs="+", help="Path to root files, e.g. /path/to/pattern*.root")
args = parser.parse_args()
sys.exit(main(args.variables, args.input, args.model_id))
| |
import logging
import time
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404, \
HttpResponseNotModified, HttpResponseServerError
from django.shortcuts import get_object_or_404, get_list_or_404, \
render_to_response
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils import simplejson, timezone
from django.utils.http import http_date
from django.utils.safestring import mark_safe
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.generic.list_detail import object_list
from djblets.auth.util import login_required
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.dates import get_latest_timestamp
from djblets.util.http import set_last_modified, get_modified_since, \
set_etag, etag_if_none_match
from djblets.util.misc import get_object_or_none
from reviewboard.accounts.decorators import check_login_required, \
valid_prefs_required
from reviewboard.accounts.models import ReviewRequestVisit, Profile
from reviewboard.attachments.forms import UploadFileForm, CommentFileForm
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.diffviewer.diffutils import get_file_chunks_in_range
from reviewboard.diffviewer.models import DiffSet
from reviewboard.diffviewer.views import view_diff, view_diff_fragment, \
exception_traceback_string
from reviewboard.extensions.hooks import DashboardHook, \
ReviewRequestDetailHook
from reviewboard.reviews.datagrids import DashboardDataGrid, \
GroupDataGrid, \
ReviewRequestDataGrid, \
SubmitterDataGrid, \
WatchedGroupDataGrid
from reviewboard.reviews.errors import OwnershipError
from reviewboard.reviews.forms import NewReviewRequestForm, \
UploadDiffForm, \
UploadScreenshotForm
from reviewboard.reviews.models import BaseComment, Comment, \
ReviewRequest, \
Review, Group, Screenshot, \
ScreenshotComment
from reviewboard.scmtools.core import PRE_CREATION
from reviewboard.scmtools.errors import SCMError
from reviewboard.site.models import LocalSite
from reviewboard.webapi.encoder import status_to_string
#####
##### Helper functions
#####
def _render_permission_denied(
request,
template_name='reviews/review_request_permission_denied.html'):
"""Renders a Permission Denied error for this review request."""
response = render_to_response(template_name, RequestContext(request))
response.status_code = 403
return response
def _find_review_request(request, review_request_id, local_site_name):
"""
Find a review request based on an ID and optional LocalSite name.
If a local site is passed in on the URL, we want to look up the review
request using the local_id instead of the pk. This allows each LocalSite
configured to have its own review request ID namespace starting from 1.
Returns either (None, response) or (ReviewRequest, None).
"""
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return None, _render_permission_denied(request)
review_request = get_object_or_404(ReviewRequest,
local_site=local_site,
local_id=review_request_id)
else:
review_request = get_object_or_404(ReviewRequest, pk=review_request_id)
if review_request.is_accessible_by(request.user):
return review_request, None
else:
return None, _render_permission_denied(request)
def _make_review_request_context(review_request, extra_context):
"""Returns a dictionary for template contexts used for review requests.
The dictionary will contain the common data that is used for all
review request-related pages (the review request detail page, the diff
viewer, and the screenshot pages).
For convenience, extra data can be passed to this dictionary.
"""
if review_request.repository:
upload_diff_form = UploadDiffForm(review_request)
scmtool = review_request.repository.get_scmtool()
else:
upload_diff_form = None
scmtool = None
return dict({
'review_request': review_request,
'upload_diff_form': upload_diff_form,
'upload_screenshot_form': UploadScreenshotForm(),
'file_attachment_form': UploadFileForm(),
'comment_file_form': CommentFileForm(),
'scmtool': scmtool,
}, **extra_context)
def _query_for_diff(review_request, user, revision, query_extra=None):
"""
Queries for a diff based on several parameters.
If the draft does not exist, this throws an Http404 exception.
"""
# Normalize the revision, since it might come in as a string.
if revision:
revision = int(revision)
# This will try to grab the diff associated with a draft if the review
# request has an associated draft and is either the revision being
# requested or no revision is being requested.
draft = review_request.get_draft(user)
if draft and draft.diffset and \
(revision is None or draft.diffset.revision == revision):
return draft.diffset
query = Q(history=review_request.diffset_history)
# Grab a revision if requested.
if revision is not None:
query = query & Q(revision=revision)
# Anything else the caller wants.
if query_extra:
query = query & query_extra
try:
results = DiffSet.objects.filter(query).latest()
return results
except DiffSet.DoesNotExist:
raise Http404
def build_diff_comment_fragments(
comments, context,
comment_template_name='reviews/diff_comment_fragment.html',
error_template_name='diffviewer/diff_fragment_error.html'):
comment_entries = []
had_error = False
siteconfig = SiteConfiguration.objects.get_current()
for comment in comments:
try:
content = render_to_string(comment_template_name, {
'comment': comment,
'chunks': list(get_file_chunks_in_range(context,
comment.filediff,
comment.interfilediff,
comment.first_line,
comment.num_lines)),
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
})
except Exception, e:
content = exception_traceback_string(None, e,
error_template_name, {
'comment': comment,
'file': {
'depot_filename': comment.filediff.source_file,
'index': None,
'filediff': comment.filediff,
},
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
})
# It's bad that we failed, and we'll return a 500, but we'll
# still return content for anything we have. This will prevent any
# caching.
had_error = True
comment_entries.append({
'comment': comment,
'html': content,
})
return had_error, comment_entries
fields_changed_name_map = {
'summary': 'Summary',
'description': 'Description',
'testing_done': 'Testing Done',
'bugs_closed': 'Bugs Closed',
'branch': 'Branch',
'target_groups': 'Reviewers (Groups)',
'target_people': 'Reviewers (People)',
'screenshots': 'Screenshots',
'screenshot_captions': 'Screenshot Captions',
'files': 'Uploaded Files',
'file_captions': 'Uploaded File Captions',
'diff': 'Diff',
}
#####
##### View functions
#####
@login_required
def new_review_request(request,
local_site_name=None,
template_name='reviews/new_review_request.html'):
"""
Displays a New Review Request form and handles the creation of a
review request based on either an existing changeset or the provided
information.
"""
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return _render_permission_denied(request)
else:
local_site = None
if request.method == 'POST':
form = NewReviewRequestForm(request.user, local_site,
request.POST, request.FILES)
if form.is_valid():
try:
review_request = form.create(
user=request.user,
diff_file=request.FILES.get('diff_path'),
parent_diff_file=request.FILES.get('parent_diff_path'),
local_site=local_site)
return HttpResponseRedirect(review_request.get_absolute_url())
except (OwnershipError, SCMError, ValueError):
pass
else:
form = NewReviewRequestForm(request.user, local_site)
return render_to_response(template_name, RequestContext(request, {
'form': form,
'fields': simplejson.dumps(form.field_mapping),
}))
@check_login_required
def review_detail(request,
review_request_id,
local_site_name=None,
template_name="reviews/review_detail.html"):
"""
Main view for review requests. This covers the review request information
and all the reviews on it.
"""
# If there's a local_site passed in the URL, we want to look up the review
# request based on the local_id instead of the pk. This allows each
# local_site configured to have its own review request ID namespace
# starting from 1.
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
reviews = review_request.get_public_reviews()
review = review_request.get_pending_review(request.user)
review_timestamp = 0
last_visited = 0
starred = False
if request.user.is_authenticated():
# If the review request is public and pending review and if the user
# is logged in, mark that they've visited this review request.
if review_request.public and review_request.status == "P":
visited, visited_is_new = ReviewRequestVisit.objects.get_or_create(
user=request.user, review_request=review_request)
last_visited = visited.timestamp.replace(tzinfo=utc)
visited.timestamp = timezone.now()
visited.save()
profile, profile_is_new = \
Profile.objects.get_or_create(user=request.user)
starred = review_request in profile.starred_review_requests.all()
# Unlike review above, this covers replies as well.
try:
last_draft_review = Review.objects.filter(
review_request=review_request,
user=request.user,
public=False).latest()
review_timestamp = last_draft_review.timestamp
except Review.DoesNotExist:
pass
draft = review_request.get_draft(request.user)
# Find out if we can bail early. Generate an ETag for this.
last_activity_time, updated_object = review_request.get_last_activity()
if draft:
draft_timestamp = draft.last_updated
else:
draft_timestamp = ""
etag = "%s:%s:%s:%s:%s:%s" % (request.user, last_activity_time,
draft_timestamp, review_timestamp,
int(starred),
settings.AJAX_SERIAL)
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
changedescs = review_request.changedescs.filter(public=True)
latest_changedesc = None
try:
latest_changedesc = changedescs.latest()
latest_timestamp = latest_changedesc.timestamp
except ChangeDescription.DoesNotExist:
latest_timestamp = None
entries = []
for temp_review in reviews:
temp_review.ordered_comments = \
temp_review.comments.order_by('filediff', 'first_line')
state = ''
# Mark as collapsed if the review is older than the latest change
if latest_timestamp and temp_review.timestamp < latest_timestamp:
state = 'collapsed'
try:
latest_reply = \
temp_review.public_replies().latest('timestamp').timestamp
except Review.DoesNotExist:
latest_reply = None
# Mark as expanded if there is a reply newer than last_visited
if latest_reply and last_visited and last_visited < latest_reply:
state = ''
entries.append({
'review': temp_review,
'timestamp': temp_review.timestamp,
'class': state,
})
for changedesc in changedescs:
fields_changed = []
for name, info in changedesc.fields_changed.items():
multiline = False
if 'added' in info or 'removed' in info:
change_type = 'add_remove'
# We don't hard-code URLs in the bug info, since the
# tracker may move, but we can do it here.
if (name == "bugs_closed" and
review_request.repository and
review_request.repository.bug_tracker):
bug_url = review_request.repository.bug_tracker
for field in info:
for i, buginfo in enumerate(info[field]):
try:
full_bug_url = bug_url % buginfo[0]
info[field][i] = (buginfo[0], full_bug_url)
except TypeError:
logging.warning("Invalid bugtracker url format")
elif 'old' in info or 'new' in info:
change_type = 'changed'
multiline = (name == "description" or name == "testing_done")
# Branch text is allowed to have entities, so mark it safe.
if name == "branch":
if 'old' in info:
info['old'][0] = mark_safe(info['old'][0])
if 'new' in info:
info['new'][0] = mark_safe(info['new'][0])
# Make status human readable.
if name == 'status':
if 'old' in info:
info['old'][0] = status_to_string(info['old'][0])
if 'new' in info:
info['new'][0] = status_to_string(info['new'][0])
elif name == "screenshot_captions":
change_type = 'screenshot_captions'
elif name == "file_captions":
change_type = 'file_captions'
else:
# No clue what this is. Bail.
continue
fields_changed.append({
'title': fields_changed_name_map.get(name, name),
'multiline': multiline,
'info': info,
'type': change_type,
})
# Expand the latest review change
state = ''
# Mark as collapsed if the change is older than a newer change
if latest_timestamp and changedesc.timestamp < latest_timestamp:
state = 'collapsed'
entries.append({
'changeinfo': fields_changed,
'changedesc': changedesc,
'timestamp': changedesc.timestamp,
'class': state,
})
entries.sort(key=lambda item: item['timestamp'])
close_description = ''
if latest_changedesc and 'status' in latest_changedesc.fields_changed:
status = latest_changedesc.fields_changed['status']['new'][0]
if status in (ReviewRequest.DISCARDED, ReviewRequest.SUBMITTED):
close_description = latest_changedesc.text
issues = {
'total': 0,
'open': 0,
'resolved': 0,
'dropped': 0
}
for entry in entries:
if 'review' in entry:
for comment in entry['review'].get_all_comments(issue_opened=True):
issues['total'] += 1
issues[BaseComment.issue_status_to_string(
comment.issue_status)] += 1
response = render_to_response(
template_name,
RequestContext(request, _make_review_request_context(review_request, {
'draft': draft,
'detail_hooks': ReviewRequestDetailHook.hooks,
'review_request_details': draft or review_request,
'entries': entries,
'last_activity_time': last_activity_time,
'review': review,
'request': request,
'latest_changedesc': latest_changedesc,
'close_description': close_description,
'PRE_CREATION': PRE_CREATION,
'issues': issues,
})))
set_etag(response, etag)
return response
@login_required
@cache_control(no_cache=True, no_store=True, max_age=0, must_revalidate=True)
def review_draft_inline_form(request,
review_request_id,
template_name,
local_site_name=None):
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
review = review_request.get_pending_review(request.user)
# This may be a brand new review. If so, we don't have a review object.
if review:
review.ordered_comments = \
review.comments.order_by('filediff', 'first_line')
return render_to_response(template_name, RequestContext(request, {
'review_request': review_request,
'review': review,
'PRE_CREATION': PRE_CREATION,
}))
@check_login_required
def all_review_requests(request,
local_site_name=None,
template_name='reviews/datagrid.html'):
"""
Displays a list of all review requests.
"""
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return _render_permission_denied(request)
else:
local_site = None
datagrid = ReviewRequestDataGrid(request,
ReviewRequest.objects.public(request.user,
status=None,
local_site=local_site,
with_counts=True),
_("All review requests"),
local_site=local_site)
return datagrid.render_to_response(template_name)
@check_login_required
def submitter_list(request,
local_site_name=None,
template_name='reviews/datagrid.html'):
"""
Displays a list of all users.
"""
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return _render_permission_denied(request)
else:
local_site = None
grid = SubmitterDataGrid(request, local_site=local_site)
return grid.render_to_response(template_name)
@check_login_required
def group_list(request,
local_site_name=None,
template_name='reviews/datagrid.html'):
"""
Displays a list of all review groups.
"""
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return _render_permission_denied(request)
else:
local_site = None
grid = GroupDataGrid(request, local_site=local_site)
return grid.render_to_response(template_name)
@login_required
@valid_prefs_required
def dashboard(request,
template_name='reviews/dashboard.html',
local_site_name=None):
"""
The dashboard view, showing review requests organized by a variety of
lists, depending on the 'view' parameter.
Valid 'view' parameters are:
* 'outgoing'
* 'to-me'
* 'to-group'
* 'starred'
* 'watched-groups'
* 'incoming'
* 'mine'
"""
view = request.GET.get('view', None)
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return _render_permission_denied(request)
else:
local_site = None
if view == "watched-groups":
# This is special. We want to return a list of groups, not
# review requests.
grid = WatchedGroupDataGrid(request, local_site=local_site)
else:
grid = DashboardDataGrid(request, local_site=local_site)
return grid.render_to_response(template_name, extra_context={
'sidebar_hooks': DashboardHook.hooks,
})
@check_login_required
def group(request,
name,
template_name='reviews/datagrid.html',
local_site_name=None):
"""
A list of review requests belonging to a particular group.
"""
# Make sure the group exists
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return _render_permission_denied(request)
else:
local_site = None
group = get_object_or_404(Group, name=name, local_site=local_site)
if not group.is_accessible_by(request.user):
return _render_permission_denied(
request, 'reviews/group_permission_denied.html')
datagrid = ReviewRequestDataGrid(request,
ReviewRequest.objects.to_group(name, local_site, status=None,
with_counts=True),
_("Review requests for %s") % name)
return datagrid.render_to_response(template_name)
@check_login_required
def group_members(request,
name,
template_name='reviews/datagrid.html',
local_site_name=None):
"""
A list of users registered for a particular group.
"""
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return _render_permission_denied(request)
else:
local_site = None
# Make sure the group exists
group = get_object_or_404(Group,
name=name,
local_site=local_site)
if not group.is_accessible_by(request.user):
return _render_permission_denied(
request, 'reviews/group_permission_denied.html')
datagrid = SubmitterDataGrid(request,
group.users.filter(is_active=True),
_("Members of group %s") % name)
return datagrid.render_to_response(template_name)
@check_login_required
def submitter(request,
username,
template_name='reviews/user_page.html',
local_site_name=None):
"""
A list of review requests owned by a particular user.
"""
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return _render_permission_denied(request)
else:
local_site = None
# Make sure the user exists
if local_site:
try:
user = local_site.users.get(username=username)
except User.DoesNotExist:
raise Http404
else:
user = get_object_or_404(User, username=username)
datagrid = ReviewRequestDataGrid(request,
ReviewRequest.objects.from_user(username, status=None,
with_counts=True,
local_site=local_site),
_("%s's review requests") % username,
local_site=local_site)
return datagrid.render_to_response(template_name, extra_context={
'show_profile': user.is_profile_visible(request.user),
'viewing_user': user,
})
@check_login_required
def diff(request,
review_request_id,
revision=None,
interdiff_revision=None,
local_site_name=None,
template_name='diffviewer/view_diff.html'):
"""
A wrapper around diffviewer.views.view_diff that handles querying for
diffs owned by a review request,taking into account interdiffs and
providing the user's current review of the diff if it exists.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
diffset = _query_for_diff(review_request, request.user, revision)
interdiffset = None
review = None
draft = None
if interdiff_revision and interdiff_revision != revision:
# An interdiff revision was specified. Try to find a matching
# diffset.
interdiffset = _query_for_diff(review_request, request.user,
interdiff_revision)
# Try to find an existing pending review of this diff from the
# current user.
review = review_request.get_pending_review(request.user)
draft = review_request.get_draft(request.user)
has_draft_diff = draft and draft.diffset
is_draft_diff = has_draft_diff and draft.diffset == diffset
is_draft_interdiff = has_draft_diff and interdiffset and \
draft.diffset == interdiffset
num_diffs = review_request.diffset_history.diffsets.count()
if draft and draft.diffset:
num_diffs += 1
last_activity_time, updated_object = review_request.get_last_activity()
return view_diff(
request, diffset, interdiffset, template_name=template_name,
extra_context=_make_review_request_context(review_request, {
'review': review,
'review_request_details': draft or review_request,
'draft': draft,
'is_draft_diff': is_draft_diff,
'is_draft_interdiff': is_draft_interdiff,
'num_diffs': num_diffs,
'last_activity_time': last_activity_time,
'specific_diff_requested': revision is not None or
interdiff_revision is not None,
'base_url': review_request.get_absolute_url(),
}))
@check_login_required
def raw_diff(request,
review_request_id,
revision=None,
local_site_name=None):
"""
Displays a raw diff of all the filediffs in a diffset for the
given review request.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
diffset = _query_for_diff(review_request, request.user, revision)
tool = review_request.repository.get_scmtool()
data = tool.get_parser('').raw_diff(diffset)
resp = HttpResponse(data, mimetype='text/x-patch')
if diffset.name == 'diff':
filename = "bug%s.patch" % review_request.bugs_closed.replace(',', '_')
else:
filename = diffset.name
resp['Content-Disposition'] = 'inline; filename=%s' % filename
set_last_modified(resp, diffset.timestamp)
return resp
@check_login_required
def comment_diff_fragments(
request,
review_request_id,
comment_ids,
template_name='reviews/load_diff_comment_fragments.js',
comment_template_name='reviews/diff_comment_fragment.html',
error_template_name='diffviewer/diff_fragment_error.html',
local_site_name=None):
"""
Returns the fragment representing the parts of a diff referenced by the
specified list of comment IDs. This is used to allow batch lazy-loading
of these diff fragments based on filediffs, since they may not be cached
and take time to generate.
"""
# While we don't actually need the review request, we still want to do this
# lookup in order to get the permissions checking.
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
comments = get_list_or_404(Comment, pk__in=comment_ids.split(","))
latest_timestamp = get_latest_timestamp([comment.timestamp
for comment in comments])
if get_modified_since(request, latest_timestamp):
return HttpResponseNotModified()
context = RequestContext(request, {
'comment_entries': [],
'container_prefix': request.GET.get('container_prefix'),
'queue_name': request.GET.get('queue'),
})
had_error, context['comment_entries'] = \
build_diff_comment_fragments(comments,
context,
comment_template_name,
error_template_name)
page_content = render_to_string(template_name, context)
if had_error:
return HttpResponseServerError(page_content)
response = HttpResponse(page_content)
set_last_modified(response, comment.timestamp)
response['Expires'] = http_date(time.time() + 60 * 60 * 24 * 365) # 1 year
return response
@check_login_required
def diff_fragment(request,
review_request_id,
revision,
filediff_id,
interdiff_revision=None,
chunkindex=None,
template_name='diffviewer/diff_file_fragment.html',
local_site_name=None):
"""
Wrapper around diffviewer.views.view_diff_fragment that takes a review
request.
Displays just a fragment of a diff or interdiff owned by the given
review request. The fragment is identified by the chunk index in the
diff.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
review_request.get_draft(request.user)
if interdiff_revision is not None:
interdiffset = _query_for_diff(review_request, request.user,
interdiff_revision)
interdiffset_id = interdiffset.id
else:
interdiffset_id = None
diffset = _query_for_diff(review_request, request.user, revision)
return view_diff_fragment(request, diffset.id, filediff_id,
review_request.get_absolute_url(),
interdiffset_id, chunkindex, template_name)
@check_login_required
def preview_review_request_email(
request,
review_request_id,
format,
text_template_name='notifications/review_request_email.txt',
html_template_name='notifications/review_request_email.html',
changedesc_id=None,
local_site_name=None):
"""
Previews the e-mail message that would be sent for an initial
review request or an update.
This is mainly used for debugging.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
extra_context = {}
if changedesc_id:
changedesc = get_object_or_404(ChangeDescription, pk=changedesc_id)
extra_context['change_text'] = changedesc.text
extra_context['changes'] = changedesc.fields_changed
siteconfig = SiteConfiguration.objects.get_current()
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
return HttpResponse(render_to_string(template_name,
RequestContext(request, dict({
'review_request': review_request,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}, **extra_context)),
), mimetype=mimetype)
@check_login_required
def preview_review_email(request, review_request_id, review_id, format,
text_template_name='notifications/review_email.txt',
html_template_name='notifications/review_email.html',
extra_context={},
local_site_name=None):
"""
Previews the e-mail message that would be sent for a review of a
review request.
This is mainly used for debugging.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
review = get_object_or_404(Review, pk=review_id,
review_request=review_request)
siteconfig = SiteConfiguration.objects.get_current()
review.ordered_comments = \
review.comments.order_by('filediff', 'first_line')
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
context = {
'review_request': review_request,
'review': review,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}
context.update(extra_context)
has_error, context['comment_entries'] = \
build_diff_comment_fragments(
review.ordered_comments, context,
"notifications/email_diff_comment_fragment.html")
return HttpResponse(
render_to_string(template_name, RequestContext(request, context)),
mimetype=mimetype)
@check_login_required
def preview_reply_email(request, review_request_id, review_id, reply_id,
format,
text_template_name='notifications/reply_email.txt',
html_template_name='notifications/reply_email.html',
local_site_name=None):
"""
Previews the e-mail message that would be sent for a reply to a
review of a review request.
This is mainly used for debugging.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
review = get_object_or_404(Review, pk=review_id,
review_request=review_request)
reply = get_object_or_404(Review, pk=reply_id, base_reply_to=review)
siteconfig = SiteConfiguration.objects.get_current()
reply.ordered_comments = \
reply.comments.order_by('filediff', 'first_line')
if format == 'text':
template_name = text_template_name
mimetype = 'text/plain'
elif format == 'html':
template_name = html_template_name
mimetype = 'text/html'
else:
raise Http404
context = {
'review_request': review_request,
'review': review,
'reply': reply,
'user': request.user,
'domain': Site.objects.get_current().domain,
'domain_method': siteconfig.get("site_domain_method"),
}
has_error, context['comment_entries'] = \
build_diff_comment_fragments(
reply.ordered_comments, context,
"notifications/email_diff_comment_fragment.html")
return HttpResponse(
render_to_string(template_name, RequestContext(request, context)),
mimetype=mimetype)
@check_login_required
def view_screenshot(request,
review_request_id,
screenshot_id,
template_name='reviews/screenshot_detail.html',
local_site_name=None):
"""
Displays a screenshot, along with any comments that were made on it.
"""
review_request, response = \
_find_review_request(request, review_request_id, local_site_name)
if not review_request:
return response
screenshot = get_object_or_404(Screenshot, pk=screenshot_id)
review = review_request.get_pending_review(request.user)
draft = review_request.get_draft(request.user)
query = Q(history=review_request.diffset_history)
if draft:
query = query & Q(reviewrequestdraft=draft)
try:
comments = ScreenshotComment.objects.filter(screenshot=screenshot)
except ScreenshotComment.DoesNotExist:
comments = []
return render_to_response(
template_name,
RequestContext(request, _make_review_request_context(review_request, {
'draft': draft,
'review_request_details': draft or review_request,
'review': review,
'details': draft or review_request,
'screenshot': screenshot,
'request': request,
'comments': comments,
})))
@check_login_required
def search(request,
template_name='reviews/search.html',
local_site_name=None):
"""
Searches review requests on Review Board based on a query string.
"""
query = request.GET.get('q', '')
siteconfig = SiteConfiguration.objects.get_current()
if not siteconfig.get("search_enable"):
# FIXME: show something useful
raise Http404
if not query:
# FIXME: I'm not super thrilled with this
return HttpResponseRedirect(reverse("root"))
if query.isdigit():
query_review_request = get_object_or_none(ReviewRequest, pk=query)
if query_review_request:
return HttpResponseRedirect(query_review_request.get_absolute_url())
import lucene
lv = [int(x) for x in lucene.VERSION.split('.')]
lucene_is_2x = lv[0] == 2 and lv[1] < 9
lucene_is_3x = lv[0] == 3 or (lv[0] == 2 and lv[1] == 9)
# We may have already initialized lucene
try:
lucene.initVM(lucene.CLASSPATH)
except ValueError:
pass
index_file = siteconfig.get("search_index_file")
if lucene_is_2x:
store = lucene.FSDirectory.getDirectory(index_file, False)
elif lucene_is_3x:
store = lucene.FSDirectory.open(lucene.File(index_file))
else:
assert False
try:
searcher = lucene.IndexSearcher(store)
except lucene.JavaError, e:
# FIXME: show a useful error
raise e
if lucene_is_2x:
parser = lucene.QueryParser('text', lucene.StandardAnalyzer())
result_ids = [int(lucene.Hit.cast_(hit).getDocument().get('id')) \
for hit in searcher.search(parser.parse(query))]
elif lucene_is_3x:
parser = lucene.QueryParser(lucene.Version.LUCENE_CURRENT, 'text',
lucene.StandardAnalyzer(lucene.Version.LUCENE_CURRENT))
result_ids = [searcher.doc(hit.doc).get('id') \
for hit in searcher.search(parser.parse(query), 100).scoreDocs]
searcher.close()
results = ReviewRequest.objects.filter(id__in=result_ids,
local_site__name=local_site_name)
return object_list(request=request,
queryset=results,
paginate_by=10,
template_name=template_name,
extra_context={'query': query,
'extra_query': 'q=%s' % query,
})
@check_login_required
def user_infobox(request, username,
template_name='accounts/user_infobox.html',
local_site_name=None):
"""Displays a user info popup.
This is meant to be embedded in other pages, rather than being
a standalone page.
"""
user = get_object_or_404(User, username=username)
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
return _render_permission_denied(request)
show_profile = user.is_profile_visible(request.user)
etag = ':'.join([user.first_name.encode('ascii', 'replace'),
user.last_name.encode('ascii', 'replace'),
user.email.encode('ascii', 'replace'),
str(user.last_login), str(settings.AJAX_SERIAL),
str(show_profile)])
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
response = render_to_response(template_name, RequestContext(request, {
'show_profile': show_profile,
'requested_user': user,
}))
set_etag(response, etag)
return response
| |
# basic_source.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,E0611,R0201,R0204,W0212,W0232,W0612
# PyPI imports
from numpy import array
import pytest
# Putil imports
from putil.plot import BasicSource as FUT
from putil.test import AE, AI, APROP, AROPROP
###
# Global variables
###
RIVAR = array([1, 2, 3])
RDVAR = array([10, 20, 30])
###
# Test classes
###
class TestBasicSource(object):
""" Tests for BasicSource """
def test_str(self):
""" Test that str behaves correctly """
# Full set
obj = str(FUT(RIVAR, RDVAR, indep_min=-10, indep_max=20.0))
ref = (
'Independent variable minimum: -10\n'
'Independent variable maximum: 20.0\n'
'Independent variable: [ 1.0, 2.0, 3.0 ]\n'
'Dependent variable: [ 10.0, 20.0, 30.0 ]'
)
assert obj == ref
# indep_min not set
obj = str(FUT(RIVAR, RDVAR, indep_max=20.0))
ref = (
'Independent variable minimum: -inf\n'
'Independent variable maximum: 20.0\n'
'Independent variable: [ 1.0, 2.0, 3.0 ]\n'
'Dependent variable: [ 10.0, 20.0, 30.0 ]'
)
assert obj == ref
# indep_max not set
obj = str(FUT(RIVAR, RDVAR, indep_min=-10))
ref = (
'Independent variable minimum: -10\n'
'Independent variable maximum: +inf\n'
'Independent variable: [ 1.0, 2.0, 3.0 ]\n'
'Dependent variable: [ 10.0, 20.0, 30.0 ]'
)
assert obj == ref
# indep_min and indep_max not set
obj = str(FUT(RIVAR, RDVAR))
ref = (
'Independent variable minimum: -inf\n'
'Independent variable maximum: +inf\n'
'Independent variable: [ 1.0, 2.0, 3.0 ]\n'
'Dependent variable: [ 10.0, 20.0, 30.0 ]'
)
assert obj == ref
def test_complete(self):
""" Test _complete property behavior """
obj = FUT(RIVAR, RDVAR, indep_min=0, indep_max=50)
obj._indep_var = None
assert not obj._complete
obj = FUT(RIVAR, RDVAR, indep_min=0, indep_max=50)
assert obj._complete
@pytest.mark.parametrize('indep_min', [1, 2.0])
def test_indep_min(self, indep_min):
""" Tests indep_min property behavior """
# __init__ path
FUT(RIVAR, RDVAR, indep_min=indep_min)
# Managed attribute path
obj = FUT(RIVAR, RDVAR)
obj.indep_min = indep_min
assert obj.indep_min == indep_min
@pytest.mark.basic_source
@pytest.mark.parametrize('indep_min', ['a', False])
def test_indep_min_exceptions(self, indep_min):
""" Tests indep_min property exceptions """
# __init__ path
AI(FUT, 'indep_min', RIVAR, RDVAR, indep_min=indep_min)
obj = FUT(RIVAR, RDVAR)
msg = 'Argument `indep_min` is not valid'
APROP(obj, 'indep_min', indep_min, RuntimeError, msg)
@pytest.mark.parametrize('indep_max', [1, 2.0])
def test_indep_max(self, indep_max):
""" Tests indep_max property behavior """
# __init__ path
FUT(RIVAR, RDVAR, indep_max=indep_max)
# Managed attribute path
obj = FUT(RIVAR, RDVAR)
obj.indep_max = indep_max
assert obj.indep_max == indep_max
@pytest.mark.basic_source
@pytest.mark.parametrize('indep_max', ['a', False])
def test_indep_max_exceptions(self, indep_max):
""" Tests indep_max property exceptions """
# __init__ path
AI(FUT, 'indep_max', RIVAR, RDVAR, indep_max=indep_max)
# Managed attribute path
obj = FUT(RIVAR, RDVAR)
msg = 'Argument `indep_max` is not valid'
APROP(obj, 'indep_max', indep_max, RuntimeError, msg)
#with pytest.raises(RuntimeError) as excinfo:
# obj.indep_max = indep_max
#assert GET_EXMSG(excinfo) == 'Argument `indep_max` is not valid'
@pytest.mark.basic_source
def test_indep_min_greater_than_indep_max_exceptions(self):
"""
Test behavior when indep_min and indep_max are incongruous
"""
# Assign indep_min first
obj = FUT(RIVAR, RDVAR, indep_min=0.5)
exmsg = 'Argument `indep_min` is greater than argument `indep_max`'
APROP(obj, 'indep_max', 0, ValueError, exmsg)
#with pytest.raises(ValueError) as excinfo:
# obj.indep_max = 0
#assert GET_EXMSG(excinfo) == exmsg
# Assign indep_max first
obj = FUT(RIVAR, RDVAR)
obj.indep_max = 40
APROP(obj, 'indep_min', 50, ValueError, exmsg)
#with pytest.raises(ValueError) as excinfo:
# obj.indep_min = 50
#assert GET_EXMSG(excinfo) == exmsg
def test_indep_var(self):
""" Tests indep_var property behavior """
# __init__ path
indep_var1 = RIVAR
indep_var2 = array([4.0, 5.0, 6.0])
assert (FUT(indep_var1, RDVAR).indep_var == indep_var1).all()
assert (FUT(indep_var2, RDVAR).indep_var == indep_var2).all()
# Managed attribute path
obj = FUT(indep_var=indep_var1, dep_var=RDVAR)
obj.indep_var = indep_var2
assert (obj.indep_var == indep_var2).all()
@pytest.mark.basic_source
@pytest.mark.parametrize(
'indep_var', [None, 'a', array([1.0, 2.0, 0.0, 3.0]), []]
)
def test_indep_var_exceptions(self, indep_var):
""" Tests indep_var property exceptions """
# __init__ path
AI(FUT, 'indep_var', indep_var, RDVAR)
# Assign indep_min via attribute
msg = (
'Argument `indep_var` is empty after '
'`indep_min`/`indep_max` range bounding'
)
obj = FUT(RIVAR, RDVAR)
APROP(obj, 'indep_min', 45, ValueError, msg)
# Assign indep_max via attribute
obj = FUT(RIVAR, RDVAR)
APROP(obj, 'indep_max', 0, ValueError, msg)
# Assign both indep_min and indep_max via __init__ path
AE(FUT, ValueError, msg, RIVAR, RDVAR, indep_min=4, indep_max=10)
# Managed attribute path
obj = FUT(RIVAR, RDVAR)
# Wrong type
assert (obj.indep_var == RIVAR).all()
msg = 'Argument `indep_var` is not valid'
APROP(obj, 'indep_var', indep_var, RuntimeError, msg)
#with pytest.raises(RuntimeError) as excinfo:
# obj.indep_var = indep_var
#assert GET_EXMSG(excinfo) == 'Argument `indep_var` is not valid'
def test_dep_var(self):
""" Tests dep_var property behavior """
# __init__ path
# Valid values, these should not raise any exception
indep_var = array([10, 20, 30])
dep_var1 = array([1, 2, 3])
dep_var2 = array([4.0, 5.0, 6.0])
assert (FUT(indep_var, dep_var1).dep_var == dep_var1).all()
assert (FUT(indep_var, dep_var2).dep_var == dep_var2).all()
# Managed attribute path
obj = FUT(indep_var=indep_var, dep_var=dep_var1)
obj.dep_var = dep_var1
assert (obj.dep_var == dep_var1).all()
obj.dep_var = dep_var2
assert (obj.dep_var == dep_var2).all()
@pytest.mark.basic_source
@pytest.mark.parametrize('dep_var', [None, 'a', []])
def test_dep_var_exceptions(self, dep_var):
""" Tests dep_var property exceptions """
# __init__ path
msg = 'Argument `dep_var` is not valid'
AI(FUT, 'dep_var', RIVAR, dep_var)
# Managed attribute path
obj = FUT(RIVAR, array([1, 2, 3]))
APROP(obj, 'dep_var', dep_var, RuntimeError, msg)
#with pytest.raises(RuntimeError) as excinfo:
# obj.dep_var = dep_var
#assert GET_EXMSG(excinfo) == msg
@pytest.mark.basic_source
def test_indep_dep_var_not_same_number_of_elements_exceptions(self):
""" Tests indep_var and dep_var vector congruency """
msg = (
'Arguments `indep_var` and `dep_var` '
'must have the same number of elements'
)
# Both set at object creation
AE(FUT, ValueError, msg, RDVAR, array([1, 2, 3, 4, 5, 6]), 30, 50)
AE(FUT, ValueError, msg, RDVAR, array([1, 2]), 30, 50)
# indep_var set first
obj = FUT(
indep_var=array([10, 20, 30, 40, 50, 60]),
dep_var=array([1, 2, 3, 4, 5, 6]),
indep_min=30,
indep_max=50)
APROP(obj, 'dep_var', array([100, 200, 300]), ValueError, msg)
# dep_var set first
obj = FUT(RDVAR, array([100, 200, 300]), indep_min=30, indep_max=50)
APROP(obj, 'dep_var', array([10, 20, 30, 40, 50, 60]), ValueError, msg)
@pytest.mark.basic_source
@pytest.mark.parametrize(
'prop', ['indep_min', 'indep_max', 'indep_var', 'dep_var']
)
def test_cannot_delete_attributes_exceptions(self, prop):
"""
Test that del method raises an exception on all class attributes
"""
AROPROP(FUT(RDVAR, array([100, 200, 300])), prop)
| |
"""
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_8433n_maxout2048_extradense.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=8, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=4, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
| |
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import itertools
import logging
import operator
import os
import six
import sys
import weakref
import ryu.contrib
ryu.contrib.update_module_path()
import ovs.db.data
import ovs.db.types
import ovs.poller
from ovs import (jsonrpc,
ovsuuid,
stream)
from ovs.db import idl
from ryu.lib import hub
from ryu.lib.ovs import vswitch_idl
LOG = logging.getLogger(__name__) # use ovs.vlog?
# for debug
def ovsrec_row_changes_to_string(ovsrec_row):
if not ovsrec_row._changes:
return ovsrec_row._changes
return dict((key, value.to_string())
for key, value in ovsrec_row._changes.items())
# for debug
def ovsrec_row_to_string(ovsrec_row):
output = ''
output += 'uuid: %s ' % ovsrec_row.uuid
if ovsrec_row._data:
output += '_data: %s ' % dict((key, value.to_string()) for key, value
in ovsrec_row._data.items())
else:
output += '_data: %s ' % ovsrec_row._data
output += '_changes: %s' % ovsrec_row_changes_to_string(ovsrec_row)
return output
def atom_from_string(base, value_string, symtab=None):
type_ = base.type
atom = None
if type_ == ovs.db.types.IntegerType:
atom = ovs.db.data.Atom(type_, int(value_string))
elif type_ == ovs.db.types.RealType:
# TODO:XXX negation
atom = ovs.db.data.Atom(
type_, ovs.db.parser.float_to_int(float(value_string)))
elif type_ == ovs.db.types.BooleanType:
if value_string in ("true", "yes", "on", "1"):
atom = ovs.db.data.Atom(type_, True)
elif value_string == ("false", "no", "off", "0"):
atom = ovs.db.data.Atom(type_, False)
elif type_ == ovs.db.types.StringType:
# TODO:XXXX escape: if value_string[0] == '"':
atom = ovs.db.data.Atom(type_, value_string)
elif type_ == ovs.db.types.UuidType:
if value_string[0] == "@":
assert symtab is not None
uuid_ = symtab[value_string]
atom = ovs.db.data.Atom(type_, uuid_)
else:
atom = ovs.db.data.Atom(type_,
ovs.ovsuuid.from_string(value_string))
if atom is None:
raise ValueError("expected %s" % type_.to_string(), value_string)
atom.check_constraints(base)
return atom
def datum_from_string(type_, value_string, symtab=None):
value_string = value_string.strip()
if type_.is_map():
if value_string.startswith('{'):
# TODO:dict case
LOG.debug('value_string %s', value_string)
raise NotImplementedError()
d = dict(v.split('=', 1) for v in value_string.split(','))
d = dict((atom_from_string(type_.key, key, symtab),
atom_from_string(type_.value, value, symtab))
for key, value in d.items())
elif type_.is_set():
if value_string.startswith('['):
# TODO:set case
LOG.debug('value_string %s', value_string)
raise NotImplementedError()
values = value_string.split(',')
d = dict((atom_from_string(type_.key, value, symtab), None)
for value in values)
else:
atom = atom_from_string(type_.key, value_string, symtab)
d = {atom: None}
datum = ovs.db.data.Datum(type_, d)
return datum.to_json()
def ifind(pred, seq):
try:
return next(filter(pred, seq))
except StopIteration:
return None
def not_reached():
os.abort()
def vsctl_fatal(msg):
LOG.error(msg)
raise Exception(msg) # not call ovs.utils.ovs_fatal for reusability
class VSCtlBridge(object):
def __init__(self, ovsrec_bridge, name, parent, vlan):
super(VSCtlBridge, self).__init__()
self.br_cfg = ovsrec_bridge
self.name = name
self.ports = set()
self.parent = parent
self.vlan = vlan
self.children = set() # WeakSet is needed?
def find_vlan_bridge(self, vlan):
return ifind(lambda child: child.vlan == vlan, self.children)
class VSCtlPort(object):
def __init__(self, vsctl_bridge_parent, ovsrec_port):
super(VSCtlPort, self).__init__()
self.bridge = weakref.ref(vsctl_bridge_parent) # backpointer
self.port_cfg = ovsrec_port
self.ifaces = set()
self.qos = None
class VSCtlIface(object):
def __init__(self, vsctl_port_parent, ovsrec_iface):
super(VSCtlIface, self).__init__()
self.port = weakref.ref(vsctl_port_parent) # backpointer
self.iface_cfg = ovsrec_iface
class VSCtlQoS(object):
def __init__(self, vsctl_port_parent, ovsrec_qos):
super(VSCtlQoS, self).__init__()
self.port = weakref.ref(vsctl_port_parent)
self.qos_cfg = ovsrec_qos
self.queues = set()
class VSCtlQueue(object):
def __init__(self, vsctl_qos_parent, ovsrec_queue):
super(VSCtlQueue, self).__init__()
self.qos = weakref.ref(vsctl_qos_parent)
self.queue_cfg = ovsrec_queue
class VSCtlContext(object):
def _invalidate_cache(self):
self.cache_valid = False
self.bridges.clear()
self.ports.clear()
self.ifaces.clear()
def __init__(self, idl_, txn, ovsrec_open_vswitch):
super(VSCtlContext, self).__init__()
# Modifiable state
# self.table = None
self.idl = idl_
self.txn = txn
self.ovs = ovsrec_open_vswitch
self.symtab = None # TODO:XXX
self.verified_ports = False
# A cache of the contents of the database.
self.cache_valid = False
self.bridges = {} # bridge name -> VSCtlBridge
self.ports = {} # port name -> VSCtlPort
self.ifaces = {} # iface name -> VSCtlIface
self.try_again = False # used by wait-until command
def done(self):
self._invalidate_cache()
def verify_bridges(self):
self.ovs.verify(vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES)
def verify_ports(self):
if self.verified_ports:
return
self.verify_bridges()
for ovsrec_bridge in self.idl.tables[
vswitch_idl.OVSREC_TABLE_BRIDGE].rows.values():
ovsrec_bridge.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
for ovsrec_port in self.idl.tables[
vswitch_idl.OVSREC_TABLE_PORT].rows.values():
ovsrec_port.verify(vswitch_idl.OVSREC_PORT_COL_INTERFACES)
self.verified_ports = True
def add_bridge_to_cache(self, ovsrec_bridge, name, parent, vlan):
vsctl_bridge = VSCtlBridge(ovsrec_bridge, name, parent, vlan)
if parent:
parent.children.add(vsctl_bridge)
self.bridges[name] = vsctl_bridge
return vsctl_bridge
def del_cached_bridge(self, vsctl_bridge):
assert not vsctl_bridge.ports
assert not vsctl_bridge.children
parent = vsctl_bridge.parent
if parent:
parent.children.remove(vsctl_bridge)
vsctl_bridge.parent = None # break circular reference
ovsrec_bridge = vsctl_bridge.br_cfg
if ovsrec_bridge:
ovsrec_bridge.delete()
self.ovs_delete_bridge(ovsrec_bridge)
del self.bridges[vsctl_bridge.name]
def del_cached_qos(self, vsctl_qos):
vsctl_qos.port().qos = None
vsctl_qos.port = None
vsctl_qos.queues = None
def add_port_to_cache(self, vsctl_bridge_parent, ovsrec_port):
tag = getattr(ovsrec_port, vswitch_idl.OVSREC_PORT_COL_TAG, None)
if (tag is not None and tag >= 0 and tag < 4096):
vlan_bridge = vsctl_bridge_parent.find_vlan_bridge()
if vlan_bridge:
vsctl_bridge_parent = vlan_bridge
vsctl_port = VSCtlPort(vsctl_bridge_parent, ovsrec_port)
vsctl_bridge_parent.ports.add(vsctl_port)
self.ports[ovsrec_port.name] = vsctl_port
return vsctl_port
def del_cached_port(self, vsctl_port):
assert not vsctl_port.ifaces
vsctl_port.bridge().ports.remove(vsctl_port)
vsctl_port.bridge = None
port = self.ports.pop(vsctl_port.port_cfg.name)
assert port == vsctl_port
vsctl_port.port_cfg.delete()
def add_iface_to_cache(self, vsctl_port_parent, ovsrec_iface):
vsctl_iface = VSCtlIface(vsctl_port_parent, ovsrec_iface)
vsctl_port_parent.ifaces.add(vsctl_iface)
self.ifaces[ovsrec_iface.name] = vsctl_iface
def add_qos_to_cache(self, vsctl_port_parent, ovsrec_qos):
vsctl_qos = VSCtlQoS(vsctl_port_parent, ovsrec_qos)
vsctl_port_parent.qos = vsctl_qos
return vsctl_qos
def add_queue_to_cache(self, vsctl_qos_parent, ovsrec_queue):
vsctl_queue = VSCtlQueue(vsctl_qos_parent, ovsrec_queue)
vsctl_qos_parent.queues.add(vsctl_queue)
def del_cached_iface(self, vsctl_iface):
vsctl_iface.port().ifaces.remove(vsctl_iface)
vsctl_iface.port = None
del self.ifaces[vsctl_iface.iface_cfg.name]
vsctl_iface.iface_cfg.delete()
def invalidate_cache(self):
if not self.cache_valid:
return
self._invalidate_cache()
def populate_cache(self):
self._populate_cache(self.idl.tables[vswitch_idl.OVSREC_TABLE_BRIDGE])
@staticmethod
def port_is_fake_bridge(ovsrec_port):
return (ovsrec_port.fake_bridge and
ovsrec_port.tag >= 0 and ovsrec_port.tag <= 4095)
def _populate_cache(self, ovsrec_bridges):
if self.cache_valid:
return
self.cache_valid = True
bridges = set()
ports = set()
for ovsrec_bridge in ovsrec_bridges.rows.values():
name = ovsrec_bridge.name
if name in bridges:
LOG.warn('%s: database contains duplicate bridge name', name)
bridges.add(name)
vsctl_bridge = self.add_bridge_to_cache(ovsrec_bridge, name,
None, 0)
if not vsctl_bridge:
continue
for ovsrec_port in ovsrec_bridge.ports:
port_name = ovsrec_port.name
if port_name in ports:
# Duplicate ovsrec_port name.
# (We will warn about that later.)
continue
ports.add(port_name)
if (self.port_is_fake_bridge(ovsrec_port) and
port_name not in bridges):
bridges.add(port_name)
self.add_bridge_to_cache(None, port_name, vsctl_bridge,
ovsrec_port.tag)
bridges = set()
for ovsrec_bridge in ovsrec_bridges.rows.values():
name = ovsrec_bridge.name
if name in bridges:
continue
bridges.add(name)
vsctl_bridge = self.bridges[name]
for ovsrec_port in ovsrec_bridge.ports:
port_name = ovsrec_port.name
vsctl_port = self.ports.get(port_name)
if vsctl_port:
if ovsrec_port == vsctl_port.port_cfg:
LOG.warn('%s: vsctl_port is in multiple bridges '
'(%s and %s)',
port_name, vsctl_bridge.name,
vsctl_port.br.name)
else:
LOG.error('%s: database contains duplicate '
'vsctl_port name',
ovsrec_port.name)
continue
if (self.port_is_fake_bridge(ovsrec_port) and
port_name in bridges):
continue
# LOG.debug('ovsrec_port %s %s %s',
# ovsrec_port, ovsrec_port._data, ovsrec_port.tag)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
# LOG.debug('vsctl_port %s', vsctl_port)
for ovsrec_iface in ovsrec_port.interfaces:
iface = self.ifaces.get(ovsrec_iface.name)
if iface:
if ovsrec_iface == iface.iface_cfg:
LOG.warn(
'%s: interface is in multiple ports '
'(%s and %s)',
ovsrec_iface.name,
iface.port().port_cfg.name,
vsctl_port.port_cfg.name)
else:
LOG.error(
'%s: database contains duplicate interface '
'name',
ovsrec_iface.name)
continue
self.add_iface_to_cache(vsctl_port, ovsrec_iface)
ovsrec_qos = ovsrec_port.qos
vsctl_qos = self.add_qos_to_cache(vsctl_port, ovsrec_qos)
if len(ovsrec_qos):
for ovsrec_queue in ovsrec_qos[0].queues:
self.add_queue_to_cache(vsctl_qos, ovsrec_queue)
def check_conflicts(self, name, msg):
self.verify_ports()
if name in self.bridges:
vsctl_fatal('%s because a bridge named %s already exists' %
(msg, name))
if name in self.ports:
vsctl_fatal('%s because a port named %s already exists on '
'bridge %s' %
(msg, name, self.ports[name].bridge().name))
if name in self.ifaces:
vsctl_fatal('%s because an interface named %s already '
'exists on bridge %s' %
(msg, name, self.ifaces[name].port().bridge().name))
def find_bridge(self, name, must_exist):
assert self.cache_valid
vsctl_bridge = self.bridges.get(name)
if must_exist and not vsctl_bridge:
vsctl_fatal('no bridge named %s' % name)
self.verify_bridges()
return vsctl_bridge
def find_real_bridge(self, name, must_exist):
vsctl_bridge = self.find_bridge(name, must_exist)
if vsctl_bridge and vsctl_bridge.parent:
vsctl_fatal('%s is a fake bridge' % name)
return vsctl_bridge
def find_bridge_by_id(self, datapath_id, must_exist):
assert self.cache_valid
for vsctl_bridge in self.bridges.values():
if vsctl_bridge.br_cfg.datapath_id[0].strip('"') == datapath_id:
self.verify_bridges()
return vsctl_bridge
if must_exist:
vsctl_fatal('no bridge id %s' % datapath_id)
return None
def find_port(self, name, must_exist):
assert self.cache_valid
vsctl_port = self.ports.get(name)
if vsctl_port and name == vsctl_port.bridge().name:
vsctl_port = None
if must_exist and not vsctl_port:
vsctl_fatal('no vsctl_port named %s' % name)
return vsctl_port
def find_iface(self, name, must_exist):
assert self.cache_valid
vsctl_iface = self.ifaces.get(name)
if vsctl_iface and name == vsctl_iface.port().bridge().name:
vsctl_iface = None
if must_exist and not vsctl_iface:
vsctl_fatal('no interface named %s' % name)
self.verify_ports()
return vsctl_iface
def set_qos(self, vsctl_port, type, max_rate):
qos = vsctl_port.qos.qos_cfg
if not len(qos):
ovsrec_qos = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QOS])
vsctl_port.port_cfg.qos = [ovsrec_qos]
else:
ovsrec_qos = qos[0]
ovsrec_qos.type = type
if max_rate is not None:
self.set_column(ovsrec_qos, 'other_config', 'max-rate', max_rate)
self.add_qos_to_cache(vsctl_port, [ovsrec_qos])
return ovsrec_qos
def set_queue(self, vsctl_qos, max_rate, min_rate,
queue_id):
ovsrec_qos = vsctl_qos.qos_cfg[0]
try:
ovsrec_queue = ovsrec_qos.queues[queue_id]
except (AttributeError, KeyError):
ovsrec_queue = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QUEUE])
if max_rate is not None:
self.set_column(ovsrec_queue, 'other_config',
'max-rate', max_rate)
if min_rate is not None:
self.set_column(ovsrec_queue, 'other_config',
'min-rate', min_rate)
self.set_column(ovsrec_qos, 'queues', queue_id,
['uuid', str(ovsrec_queue.uuid)])
self.add_queue_to_cache(vsctl_qos, ovsrec_queue)
return ovsrec_queue
@staticmethod
def _column_set(ovsrec_row, column, ovsrec_value):
# need to trigger Row.__setattr__()
setattr(ovsrec_row, column, ovsrec_value)
@staticmethod
def _column_insert(ovsrec_row, column, ovsrec_add):
value = getattr(ovsrec_row, column)
value.append(ovsrec_add)
VSCtlContext._column_set(ovsrec_row, column, value)
@staticmethod
def _column_delete(ovsrec_row, column, ovsrec_del):
value = getattr(ovsrec_row, column)
try:
value.remove(ovsrec_del)
except ValueError:
# Datum.to_python() with _uuid_to_row trims down deleted
# references. If ovsrec_del.delete() is called before
# _column_delete(), value doesn't include ovsrec_del.
pass
VSCtlContext._column_set(ovsrec_row, column, value)
@staticmethod
def bridge_insert_port(ovsrec_bridge, ovsrec_port):
VSCtlContext._column_insert(ovsrec_bridge,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS,
ovsrec_port)
@staticmethod
def bridge_delete_port(ovsrec_bridge, ovsrec_port):
VSCtlContext._column_delete(ovsrec_bridge,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS,
ovsrec_port)
@staticmethod
def port_delete_qos(ovsrec_port, ovsrec_qos):
VSCtlContext._column_delete(ovsrec_port,
vswitch_idl.OVSREC_PORT_COL_QOS,
ovsrec_qos)
def ovs_insert_bridge(self, ovsrec_bridge):
self._column_insert(self.ovs,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
ovsrec_bridge)
def ovs_delete_bridge(self, ovsrec_bridge):
self._column_delete(self.ovs,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
ovsrec_bridge)
def del_port(self, vsctl_port):
if vsctl_port.bridge().parent:
ovsrec_bridge = vsctl_port.bridge().parent.br_cfg
else:
ovsrec_bridge = vsctl_port.bridge().br_cfg
self.bridge_delete_port(ovsrec_bridge, vsctl_port.port_cfg)
for vsctl_iface in vsctl_port.ifaces.copy():
self.del_cached_iface(vsctl_iface)
self.del_cached_port(vsctl_port)
def del_bridge(self, vsctl_bridge):
for child in vsctl_bridge.children.copy():
self.del_bridge(child)
for vsctl_port in vsctl_bridge.ports.copy():
self.del_port(vsctl_port)
self.del_cached_bridge(vsctl_bridge)
def del_qos(self, vsctl_qos):
ovsrec_port = vsctl_qos.port().port_cfg
ovsrec_qos = vsctl_qos.qos_cfg
if len(ovsrec_qos):
self.port_delete_qos(ovsrec_port, ovsrec_qos[0])
self.del_cached_qos(vsctl_qos)
def add_port(self, br_name, port_name, may_exist, fake_iface,
iface_names, settings=None):
"""
:type settings: list of (column, key, value_json)
where column and key are str,
value_json is json that is represented
by Datum.to_json()
"""
settings = settings or []
self.populate_cache()
if may_exist:
vsctl_port = self.find_port(port_name, False)
if vsctl_port:
want_names = set(iface_names)
have_names = set(ovsrec_iface.name for ovsrec_iface in
vsctl_port.port_cfg.interfaces)
if vsctl_port.bridge().name != br_name:
vsctl_fatal('"%s" but %s is actually attached to '
'vsctl_bridge %s',
br_name, port_name, vsctl_port.bridge().name)
if want_names != have_names:
want_names_string = ','.join(want_names)
have_names_string = ','.join(have_names)
vsctl_fatal('"%s" but %s actually has interface(s) %s' %
(want_names_string,
port_name, have_names_string))
return
self.check_conflicts(port_name,
'cannot create a port named %s' % port_name)
for iface_name in iface_names:
self.check_conflicts(
iface_name, 'cannot create an interface named %s' % iface_name)
vsctl_bridge = self.find_bridge(br_name, True)
ifaces = []
for iface_name in iface_names:
ovsrec_iface = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = iface_name
ifaces.append(ovsrec_iface)
ovsrec_port = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = port_name
ovsrec_port.interfaces = ifaces
ovsrec_port.bond_fake_iface = fake_iface
if vsctl_bridge.parent:
tag = vsctl_bridge.vlan
ovsrec_port.tag = tag
for setting in settings:
# TODO:XXX self.symtab:
column, key, value = setting
self.set_column(ovsrec_port, column, key, value)
if vsctl_bridge.parent:
ovsrec_bridge = vsctl_bridge.parent.br_cfg
else:
ovsrec_bridge = vsctl_bridge.br_cfg
self.bridge_insert_port(ovsrec_bridge, ovsrec_port)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
for ovsrec_iface in ifaces:
self.add_iface_to_cache(vsctl_port, ovsrec_iface)
def add_bridge(self, br_name, parent_name=None, vlan=0, may_exist=False):
self.populate_cache()
if may_exist:
vsctl_bridge = self.find_bridge(br_name, False)
if vsctl_bridge:
if not parent_name:
if vsctl_bridge.parent:
vsctl_fatal('"--may-exist add-vsctl_bridge %s" '
'but %s is a VLAN bridge for VLAN %d' %
(br_name, br_name, vsctl_bridge.vlan))
else:
if not vsctl_bridge.parent:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s is not a VLAN bridge' %
(br_name, parent_name, vlan, br_name))
elif vsctl_bridge.parent.name != parent_name:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s has the wrong parent %s' %
(br_name, parent_name, vlan,
br_name, vsctl_bridge.parent.name))
elif vsctl_bridge.vlan != vlan:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s is a VLAN bridge for the wrong '
'VLAN %d' %
(br_name, parent_name, vlan, br_name,
vsctl_bridge.vlan))
return
self.check_conflicts(br_name,
'cannot create a bridge named %s' % br_name)
txn = self.txn
tables = self.idl.tables
if not parent_name:
ovsrec_iface = txn.insert(
tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = br_name
ovsrec_iface.type = 'internal'
ovsrec_port = txn.insert(tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = br_name
ovsrec_port.interfaces = [ovsrec_iface]
ovsrec_port.fake_bridge = False
ovsrec_bridge = txn.insert(tables[vswitch_idl.OVSREC_TABLE_BRIDGE])
ovsrec_bridge.name = br_name
ovsrec_bridge.ports = [ovsrec_port]
self.ovs_insert_bridge(ovsrec_bridge)
else:
parent = self.find_bridge(parent_name, False)
if parent and parent.parent:
vsctl_fatal('cannot create bridge with fake bridge as parent')
if not parent:
vsctl_fatal('parent bridge %s does not exist' % parent_name)
ovsrec_iface = txn.insert(
tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = br_name
ovsrec_iface.type = 'internal'
ovsrec_port = txn.insert(tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = br_name
ovsrec_port.interfaces = [ovsrec_iface]
ovsrec_port.fake_bridge = True
ovsrec_port.tag = vlan
self.bridge_insert_port(parent.br_cfg, ovsrec_port)
self.invalidate_cache()
@staticmethod
def parse_column_key_value(table_schema, setting_string):
"""
parse <column>[:<key>]=<value>
"""
column_value = setting_string.split('=', 1)
if len(column_value) == 1:
column = column_value[0]
value = None
else:
column, value = column_value
if ':' in column:
column, key = column.split(':', 1)
else:
key = None
if value is not None:
LOG.debug("columns %s", list(table_schema.columns.keys()))
type_ = table_schema.columns[column].type
value = datum_from_string(type_, value)
LOG.debug("column %s value %s", column, value)
return (column, key, value)
def set_column(self, ovsrec_row, column, key, value_json):
if column not in ovsrec_row._table.columns:
vsctl_fatal('%s does not contain a column whose name matches "%s"'
% (ovsrec_row._table.name, column))
column_schema = ovsrec_row._table.columns[column]
if key is not None:
value_json = ['map', [[key, value_json]]]
if column_schema.type.value.type == ovs.db.types.VoidType:
vsctl_fatal('cannot specify key to set for non-map column %s' %
column)
datum = ovs.db.data.Datum.from_json(column_schema.type, value_json,
self.symtab)
values = getattr(ovsrec_row, column, {})
values.update(datum.to_python(ovs.db.idl._uuid_to_row))
setattr(ovsrec_row, column, values)
else:
datum = ovs.db.data.Datum.from_json(column_schema.type, value_json,
self.symtab)
setattr(ovsrec_row, column,
datum.to_python(ovs.db.idl._uuid_to_row))
def _get_row_by_id(self, table_name, vsctl_row_id, record_id):
if not vsctl_row_id.table:
return None
if not vsctl_row_id.name_column:
if record_id != '.':
return None
values = list(self.idl.tables[vsctl_row_id.table].rows.values())
if not values or len(values) > 2:
return None
referrer = values[0]
else:
referrer = None
for ovsrec_row in self.idl.tables[
vsctl_row_id.table].rows.values():
name = getattr(ovsrec_row, vsctl_row_id.name_column)
assert type(name) in (list, str, six.text_type)
if type(name) != list and name == record_id:
if (referrer):
vsctl_fatal('multiple rows in %s match "%s"' %
(table_name, record_id))
referrer = ovsrec_row
if not referrer:
return None
final = None
if vsctl_row_id.uuid_column:
referrer.verify(vsctl_row_id.uuid_column)
uuid = getattr(referrer, vsctl_row_id.uuid_column)
uuid_ = referrer._data[vsctl_row_id.uuid_column]
assert uuid_.type.key.type == ovs.db.types.UuidType
assert uuid_.type.value is None
assert type(uuid) == list
if len(uuid) == 1:
final = uuid[0]
else:
final = referrer
return final
def get_row(self, vsctl_table, record_id):
table_name = vsctl_table.table_name
if ovsuuid.is_valid_string(record_id):
uuid = ovsuuid.from_string(record_id)
return self.idl.tables[table_name].rows.get(uuid)
else:
for vsctl_row_id in vsctl_table.row_ids:
ovsrec_row = self._get_row_by_id(table_name, vsctl_row_id,
record_id)
if ovsrec_row:
return ovsrec_row
return None
def must_get_row(self, vsctl_table, record_id):
ovsrec_row = self.get_row(vsctl_table, record_id)
if not ovsrec_row:
vsctl_fatal('no row "%s" in table %s' % (record_id,
vsctl_table.table_name))
return ovsrec_row
class _CmdShowTable(object):
def __init__(self, table, name_column, columns, recurse):
super(_CmdShowTable, self).__init__()
self.table = table
self.name_column = name_column
self.columns = columns
self.recurse = recurse
class _VSCtlRowID(object):
def __init__(self, table, name_column, uuid_column):
super(_VSCtlRowID, self).__init__()
self.table = table
self.name_column = name_column
self.uuid_column = uuid_column
class _VSCtlTable(object):
def __init__(self, table_name, vsctl_row_id_list):
super(_VSCtlTable, self).__init__()
self.table_name = table_name
self.row_ids = vsctl_row_id_list
class VSCtlCommand(object):
def __init__(self, command, args=None, options=None):
super(VSCtlCommand, self).__init__()
self.command = command
self.args = args or []
self.options = options or []
# Data modified by commands
self.result = None
# internally used by VSCtl
self._prerequisite = None
self._run = None
def has_option(self, option):
return option in self.options
class VSCtl(object):
def _reset(self):
self.schema_helper = None
self.ovs = None
self.txn = None
self.wait_for_reload = True
self.dry_run = False
def __init__(self, remote):
super(VSCtl, self).__init__()
self.remote = remote
self.schema_json = None
self.schema = None
self.schema_helper = None
self.ovs = None
self.txn = None
self.wait_for_reload = True
self.dry_run = False
def _rpc_get_schema_json(self, database):
LOG.debug('remote %s', self.remote)
error, stream_ = stream.Stream.open_block(
stream.Stream.open(self.remote))
if error:
vsctl_fatal('error %s' % os.strerror(error))
rpc = jsonrpc.Connection(stream_)
request = jsonrpc.Message.create_request('get_schema', [database])
error, reply = rpc.transact_block(request)
rpc.close()
if error:
vsctl_fatal(os.strerror(error))
elif reply.error:
vsctl_fatal('error %s' % reply.error)
return reply.result
def _init_schema_helper(self):
if self.schema_json is None:
self.schema_json = self._rpc_get_schema_json(
vswitch_idl.OVSREC_DB_NAME)
schema_helper = idl.SchemaHelper(None, self.schema_json)
schema_helper.register_all()
self.schema = schema_helper.get_idl_schema()
# LOG.debug('schema_json %s', schema_json)
self.schema_helper = idl.SchemaHelper(None, self.schema_json)
@staticmethod
def _idl_block(idl_):
poller = ovs.poller.Poller()
idl_.wait(poller)
poller.block()
@staticmethod
def _idl_wait(idl_, seqno):
while idl_.change_seqno == seqno and not idl_.run():
VSCtl._idl_block(idl_)
def _run_prerequisites(self, commands):
schema_helper = self.schema_helper
schema_helper.register_table(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH)
if self.wait_for_reload:
# LOG.debug('schema_helper._tables %s', schema_helper._tables)
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_CUR_CFG])
for command in commands:
if not command._prerequisite:
continue
ctx = VSCtlContext(None, None, None)
command._prerequisite(ctx, command)
ctx.done()
def _do_vsctl(self, idl_, commands):
txn = idl.Transaction(idl_)
self.txn = txn
if self.dry_run:
txn.dry_run = True
txn.add_comment('ovs-vsctl') # TODO:XXX add operation name. args
ovs_rows = idl_.tables[vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH].rows
if ovs_rows:
ovs_ = list(ovs_rows.values())[0]
else:
# XXX add verification that table is empty
ovs_ = txn.insert(
idl_.tables[vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH])
if self.wait_for_reload:
ovs_.increment(vswitch_idl.OVSREC_OPEN_VSWITCH_COL_NEXT_CFG)
# TODO:XXX
# symtab = ovsdb_symbol_table_create()
ctx = VSCtlContext(idl_, txn, ovs_)
for command in commands:
if not command._run:
continue
command._run(ctx, command)
if ctx.try_again:
return False
LOG.debug('result_backup:\n%s', [command.result for command in commands])
ctx.done()
# TODO:XXX check if created symbols are really created, referenced.
status = txn.commit_block()
next_cfg = 0
if self.wait_for_reload and status == idl.Transaction.SUCCESS:
next_cfg = txn.get_increment_new_value()
# TODO:XXX
# if status in (idl.Transaction.UNCHANGED, idl.Transaction.SUCCESS):
# for command in commands:
# if not command.post_func:
# continue
# ctx = VSCtlContext(idl_, txn, self.ovs)
# command.post_func(ctx)
# ctx.done()
txn_ = self.txn
self.txn = None
txn = None
if status in (idl.Transaction.UNCOMMITTED, idl.Transaction.INCOMPLETE):
not_reached()
elif status == idl.Transaction.ABORTED:
vsctl_fatal('transaction aborted')
elif status == idl.Transaction.UNCHANGED:
LOG.info('unchanged')
elif status == idl.Transaction.SUCCESS:
LOG.info('success')
elif status == idl.Transaction.TRY_AGAIN:
return False
elif status == idl.Transaction.ERROR:
vsctl_fatal('transaction error: %s' % txn_.get_error())
elif status == idl.Transaction.NOT_LOCKED:
vsctl_fatal('database not locked')
else:
not_reached()
if self.wait_for_reload and status != idl.Transaction.UNCHANGED:
while True:
idl_.run()
if (ovs_.cur_cfg >= next_cfg):
break
self._idl_block(idl_)
return True
def _do_main(self, commands):
"""
:type commands: list of VSCtlCommand
"""
self._reset()
self._init_schema_helper()
self._run_prerequisites(commands)
idl_ = idl.Idl(self.remote, self.schema_helper)
seqno = idl_.change_seqno
while True:
self._idl_wait(idl_, seqno)
seqno = idl_.change_seqno
if self._do_vsctl(idl_, commands):
break
if self.txn:
self.txn.abort()
self.txn = None
# TODO:XXX
# ovsdb_symbol_table_destroy(symtab)
idl_.close()
def _run_command(self, commands):
"""
:type commands: list of VSCtlCommand
"""
all_commands = {
# Open vSwitch commands.
'init': (None, self._cmd_init),
'show': (self._pre_cmd_show, self._cmd_show),
# Bridge commands.
'add-br': (self._pre_add_br, self._cmd_add_br),
'del-br': (self._pre_get_info, self._cmd_del_br),
'list-br': (self._pre_get_info, self._cmd_list_br),
# Port. commands
'list-ports': (self._pre_get_info, self._cmd_list_ports),
'add-port': (self._pre_cmd_add_port, self._cmd_add_port),
'del-port': (self._pre_get_info, self._cmd_del_port),
# 'add-bond':
# 'port-to-br':
# Interface commands.
'list-ifaces': (self._pre_get_info, self._cmd_list_ifaces),
# 'iface-to-br':
# Controller commands.
'get-controller': (self._pre_controller, self._cmd_get_controller),
'del-controller': (self._pre_controller, self._cmd_del_controller),
'set-controller': (self._pre_controller, self._cmd_set_controller),
# 'get-fail-mode':
# 'del-fail-mode':
# 'set-fail-mode':
# Manager commands.
# 'get-manager':
# 'del-manager':
# 'set-manager':
# Switch commands.
# 'emer-reset':
# Database commands.
# 'comment':
'get': (self._pre_cmd_get, self._cmd_get),
# 'list':
'find': (self._pre_cmd_find, self._cmd_find),
'set': (self._pre_cmd_set, self._cmd_set),
# 'add':
'clear': (self._pre_cmd_clear, self._cmd_clear),
# 'create':
# 'destroy':
# 'wait-until':
'set-qos': (self._pre_cmd_set_qos, self._cmd_set_qos),
'set-queue': (self._pre_cmd_set_queue, self._cmd_set_queue),
'del-qos': (self._pre_get_info, self._cmd_del_qos),
# for quantum_adapter
'list-ifaces-verbose': (self._pre_cmd_list_ifaces_verbose,
self._cmd_list_ifaces_verbose),
}
for command in commands:
funcs = all_commands[command.command]
command._prerequisite, command._run = funcs
self._do_main(commands)
def run_command(self, commands, timeout_sec=None, exception=None):
if timeout_sec is None:
self._run_command(commands)
else:
with hub.Timeout(timeout_sec, exception):
self._run_command(commands)
# commands
def _cmd_init(self, _ctx, _command):
# nothing. Just check connection to ovsdb
pass
_CMD_SHOW_TABLES = [
_CmdShowTable(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH, None,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_MANAGER_OPTIONS,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_OVS_VERSION],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
[vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER,
vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
[vswitch_idl.OVSREC_PORT_COL_TAG,
vswitch_idl.OVSREC_PORT_COL_TRUNKS,
vswitch_idl.OVSREC_PORT_COL_INTERFACES],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_INTERFACE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE,
vswitch_idl.OVSREC_INTERFACE_COL_OPTIONS],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_CONTROLLER,
vswitch_idl.OVSREC_CONTROLLER_COL_TARGET,
[vswitch_idl.OVSREC_CONTROLLER_COL_IS_CONNECTED],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_MANAGER,
vswitch_idl.OVSREC_MANAGER_COL_TARGET,
[vswitch_idl.OVSREC_MANAGER_COL_IS_CONNECTED],
False),
]
def _pre_cmd_show(self, _ctx, _command):
schema_helper = self.schema_helper
for show in self._CMD_SHOW_TABLES:
schema_helper.register_table(show.table)
if show.name_column:
schema_helper.register_columns(show.table, [show.name_column])
schema_helper.register_columns(show.table, show.columns)
@staticmethod
def _cmd_show_find_table_by_row(row):
for show in VSCtl._CMD_SHOW_TABLES:
if show.table == row._table.name:
return show
return None
@staticmethod
def _cmd_show_find_table_by_name(name):
for show in VSCtl._CMD_SHOW_TABLES:
if show.table == name:
return show
return None
@staticmethod
def _cmd_show_row(ctx, row, level):
_INDENT_SIZE = 4 # # of spaces per indent
show = VSCtl._cmd_show_find_table_by_row(row)
output = ''
output += ' ' * level * _INDENT_SIZE
if show and show.name_column:
output += '%s ' % show.table
datum = getattr(row, show.name_column)
output += datum
else:
output += str(row.uuid)
output += '\n'
if not show or show.recurse:
return
show.recurse = True
for column in show.columns:
datum = row._data[column]
key = datum.type.key
if (key.type == ovs.db.types.UuidType and key.ref_table_name):
ref_show = VSCtl._cmd_show_find_table_by_name(
key.ref_table_name)
if ref_show:
for atom in datum.values:
ref_row = ctx.idl.tables[ref_show.table].rows.get(
atom.value)
if ref_row:
VSCtl._cmd_show_row(ctx, ref_row, level + 1)
continue
if not datum.is_default():
output += ' ' * (level + 1) * _INDENT_SIZE
output += '%s: %s\n' % (column, datum)
show.recurse = False
return output
def _cmd_show(self, ctx, command):
for row in ctx.idl.tables[
self._CMD_SHOW_TABLES[0].table].rows.values():
output = self._cmd_show_row(ctx, row, 0)
command.result = output
def _pre_get_info(self, _ctx, _command):
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_BRIDGE,
[vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER,
vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT,
[vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_FAKE_BRIDGE,
vswitch_idl.OVSREC_PORT_COL_TAG,
vswitch_idl.OVSREC_PORT_COL_INTERFACES,
vswitch_idl.OVSREC_PORT_COL_QOS])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_NAME])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QOS,
[vswitch_idl.OVSREC_QOS_COL_QUEUES])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QUEUE,
[])
def _cmd_list_br(self, ctx, command):
ctx.populate_cache()
command.result = sorted(ctx.bridges.keys())
def _pre_add_br(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE])
def _cmd_add_br(self, ctx, command):
br_name = command.args[0]
if len(command.args) == 1:
parent_name = None
vlan = 0
elif len(command.args) == 3:
parent_name = command.args[1]
vlan = int(command.args[2])
if vlan < 0 or vlan > 4095:
vsctl_fatal("vlan must be between 0 and 4095 %d" % vlan)
else:
vsctl_fatal('this command takes exactly 1 or 3 argument')
ctx.add_bridge(br_name, parent_name, vlan)
def _del_br(self, ctx, br_name, must_exist=False):
ctx.populate_cache()
br = ctx.find_bridge(br_name, must_exist)
if br:
ctx.del_bridge(br)
def _cmd_del_br(self, ctx, command):
br_name = command.args[0]
self._del_br(ctx, br_name)
def _list_ports(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
if br.br_cfg:
br.br_cfg.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
else:
br.parent.br_cfg.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
return [port.port_cfg.name for port in br.ports
if port.port_cfg.name != br.name]
def _cmd_list_ports(self, ctx, command):
br_name = command.args[0]
port_names = self._list_ports(ctx, br_name)
command.result = sorted(port_names)
def _pre_add_port(self, _ctx, columns):
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT,
[vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_BOND_FAKE_IFACE])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT, columns)
def _pre_cmd_add_port(self, ctx, command):
self._pre_get_info(ctx, command)
columns = [ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)[0]
for setting in command.args[2:]]
self._pre_add_port(ctx, columns)
def _cmd_add_port(self, ctx, command):
may_exist = command.has_option('--may_exist')
br_name = command.args[0]
port_name = command.args[1]
iface_names = [command.args[1]]
settings = [ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)
for setting in command.args[2:]]
ctx.add_port(br_name, port_name, may_exist,
False, iface_names, settings)
def _del_port(self, ctx, br_name=None, target=None,
must_exist=False, with_iface=False):
assert target is not None
ctx.populate_cache()
if not with_iface:
vsctl_port = ctx.find_port(target, must_exist)
else:
vsctl_port = ctx.find_port(target, False)
if not vsctl_port:
vsctl_iface = ctx.find_iface(target, False)
if vsctl_iface:
vsctl_port = vsctl_iface.port()
if must_exist and not vsctl_port:
vsctl_fatal('no port or interface named %s' % target)
if not vsctl_port:
return
if not br_name:
vsctl_bridge = ctx.find_bridge(br_name, True)
if vsctl_port.bridge() != vsctl_bridge:
if vsctl_port.bridge().parent == vsctl_bridge:
vsctl_fatal('bridge %s does not have a port %s (although '
'its parent bridge %s does)' %
(br_name, target, vsctl_bridge.parent.name))
else:
vsctl_fatal('bridge %s does not have a port %s' %
(br_name, target))
ctx.del_port(vsctl_port)
def _cmd_del_port(self, ctx, command):
must_exist = command.has_option('--must-exist')
with_iface = command.has_option('--with-iface')
target = command.args[-1]
br_name = command.args[0] if len(command.args) == 2 else None
self._del_port(ctx, br_name, target, must_exist, with_iface)
def _list_ifaces(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
ctx.verify_ports()
iface_names = set()
for vsctl_port in br.ports:
for vsctl_iface in vsctl_port.ifaces:
iface_name = vsctl_iface.iface_cfg.name
if iface_name != br_name:
iface_names.add(iface_name)
return iface_names
def _cmd_list_ifaces(self, ctx, command):
br_name = command.args[0]
iface_names = self._list_ifaces(ctx, br_name)
command.result = sorted(iface_names)
def _pre_cmd_list_ifaces_verbose(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_BRIDGE,
[vswitch_idl.OVSREC_BRIDGE_COL_DATAPATH_ID])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
vswitch_idl.OVSREC_INTERFACE_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_INTERFACE_COL_OPTIONS,
vswitch_idl.OVSREC_INTERFACE_COL_OFPORT])
@staticmethod
def _iface_to_dict(iface_cfg):
_ATTRIBUTE = ['name', 'ofport', 'type', 'external_ids', 'options']
attr = dict((key, getattr(iface_cfg, key)) for key in _ATTRIBUTE)
if attr['ofport']:
attr['ofport'] = attr['ofport'][0]
return attr
def _list_ifaces_verbose(self, ctx, datapath_id, port_name):
ctx.populate_cache()
br = ctx.find_bridge_by_id(datapath_id, True)
ctx.verify_ports()
iface_cfgs = []
if port_name is None:
for vsctl_port in br.ports:
iface_cfgs.extend(self._iface_to_dict(vsctl_iface.iface_cfg)
for vsctl_iface in vsctl_port.ifaces)
else:
# When port is created, ofport column might be None.
# So try with port name if it happended
for vsctl_port in br.ports:
iface_cfgs.extend(
self._iface_to_dict(vsctl_iface.iface_cfg)
for vsctl_iface in vsctl_port.ifaces
if (vsctl_iface.iface_cfg.name == port_name))
return iface_cfgs
def _cmd_list_ifaces_verbose(self, ctx, command):
datapath_id = command.args[0]
port_name = None
if len(command.args) >= 2:
port_name = command.args[1]
LOG.debug('command.args %s', command.args)
iface_cfgs = self._list_ifaces_verbose(ctx, datapath_id, port_name)
command.result = sorted(iface_cfgs)
def _verify_controllers(self, ovsrec_bridge):
ovsrec_bridge.verify(vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER)
for controller in ovsrec_bridge.controller:
controller.verify(vswitch_idl.OVSREC_CONTROLLER_COL_TARGET)
def _pre_controller(self, ctx, command):
self._pre_get_info(ctx, command)
self.schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_CONTROLLER,
[vswitch_idl.OVSREC_CONTROLLER_COL_TARGET])
def _get_controller(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
self._verify_controllers(br.br_cfg)
return set(controller.target for controller in br.br_cfg.controller)
def _cmd_get_controller(self, ctx, command):
br_name = command.args[0]
controller_names = self._get_controller(ctx, br_name)
command.result = sorted(controller_names)
def _delete_controllers(self, ovsrec_controllers):
for controller in ovsrec_controllers:
controller.delete()
def _del_controller(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_real_bridge(br_name, True)
ovsrec_bridge = br.br_cfg
self._verify_controllers(ovsrec_bridge)
if ovsrec_bridge.controller:
self._delete_controllers(ovsrec_bridge.controller)
ovsrec_bridge.controller = []
def _cmd_del_controller(self, ctx, command):
br_name = command.args[0]
self._del_controller(ctx, br_name)
def _insert_controllers(self, controller_names):
ovsrec_controllers = []
for name in controller_names:
# TODO: check if the name startswith() supported protocols
ovsrec_controller = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_CONTROLLER])
ovsrec_controller.target = name
ovsrec_controllers.append(ovsrec_controller)
return ovsrec_controllers
def _insert_qos(self):
ovsrec_qos = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QOS])
return ovsrec_qos
def _set_controller(self, ctx, br_name, controller_names):
ctx.populate_cache()
ovsrec_bridge = ctx.find_real_bridge(br_name, True).br_cfg
self._verify_controllers(ovsrec_bridge)
self._delete_controllers(ovsrec_bridge.controller)
controllers = self._insert_controllers(controller_names)
ovsrec_bridge.controller = controllers
def _cmd_set_controller(self, ctx, command):
br_name = command.args[0]
controller_names = command.args[1:]
self._set_controller(ctx, br_name, controller_names)
def _del_qos(self, ctx, port_name):
assert port_name is not None
ctx.populate_cache()
vsctl_port = ctx.find_port(port_name, True)
vsctl_qos = vsctl_port.qos
ctx.del_qos(vsctl_qos)
def _cmd_del_qos(self, ctx, command):
port_name = command.args[0]
self._del_qos(ctx, port_name)
def _set_qos(self, ctx, port_name, type, max_rate):
ctx.populate_cache()
vsctl_port = ctx.find_port(port_name, True)
ovsrec_qos = ctx.set_qos(vsctl_port, type, max_rate)
return ovsrec_qos
def _cmd_set_qos(self, ctx, command):
port_name = command.args[0]
type = command.args[1]
max_rate = command.args[2]
result = self._set_qos(ctx, port_name, type, max_rate)
command.result = [result]
def _pre_cmd_set_qos(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QOS,
[vswitch_idl.OVSREC_QOS_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_QOS_COL_OTHER_CONFIG,
vswitch_idl.OVSREC_QOS_COL_QUEUES,
vswitch_idl.OVSREC_QOS_COL_TYPE])
def _cmd_set_queue(self, ctx, command):
ctx.populate_cache()
port_name = command.args[0]
queues = command.args[1]
vsctl_port = ctx.find_port(port_name, True)
vsctl_qos = vsctl_port.qos
queue_id = 0
results = []
for queue in queues:
max_rate = queue.get('max-rate', None)
min_rate = queue.get('min-rate', None)
ovsrec_queue = ctx.set_queue(
vsctl_qos, max_rate, min_rate, queue_id)
results.append(ovsrec_queue)
queue_id += 1
command.result = results
def _pre_cmd_set_queue(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QUEUE,
[vswitch_idl.OVSREC_QUEUE_COL_DSCP,
vswitch_idl.OVSREC_QUEUE_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_QUEUE_COL_OTHER_CONFIG])
_TABLES = [
_VSCtlTable(vswitch_idl.OVSREC_TABLE_BRIDGE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_CONTROLLER,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_INTERFACE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_INTERFACE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_MIRROR,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_MIRROR,
vswitch_idl.OVSREC_MIRROR_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_MANAGER,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_MANAGER,
vswitch_idl.OVSREC_MANAGER_COL_TARGET,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_NETFLOW,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_NETFLOW)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
None,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_PORT,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_QOS,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_QOS)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_QUEUE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_QOS,
None,
vswitch_idl.OVSREC_QOS_COL_QUEUES)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_SSL,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
None,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_SSL)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_SFLOW,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_SFLOW)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_FLOW_TABLE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_FLOW_TABLE,
vswitch_idl.OVSREC_FLOW_TABLE_COL_NAME,
None)]),
]
@staticmethod
def _score_partial_match(name, s):
_MAX_SCORE = 0xffffffff
assert len(name) < _MAX_SCORE
s = s[:_MAX_SCORE - 1] # in practice, this doesn't matter
if name == s:
return _MAX_SCORE
name = name.lower().replace('-', '_')
s = s.lower().replace('-', '_')
if s.startswith(name):
return _MAX_SCORE - 1
if name.startswith(s):
return len(s)
return 0
@staticmethod
def _get_table(table_name):
best_match = None
best_score = 0
for table in VSCtl._TABLES:
score = VSCtl._score_partial_match(table.table_name, table_name)
if score > best_score:
best_match = table
best_score = score
elif score == best_score:
best_match = None
if best_match:
return best_match
elif best_score:
vsctl_fatal('multiple table names match "%s"' % table_name)
else:
vsctl_fatal('unknown table "%s"' % table_name)
def _pre_get_table(self, _ctx, table_name):
vsctl_table = self._get_table(table_name)
schema_helper = self.schema_helper
schema_helper.register_table(vsctl_table.table_name)
for row_id in vsctl_table.row_ids:
if row_id.table:
schema_helper.register_table(row_id.table)
if row_id.name_column:
schema_helper.register_columns(row_id.table,
[row_id.name_column])
if row_id.uuid_column:
schema_helper.register_columns(row_id.table,
[row_id.uuid_column])
return vsctl_table
def _get_column(self, table_name, column_name):
best_match = None
best_score = 0
columns = self.schema.tables[table_name].columns.keys()
for column in columns:
score = VSCtl._score_partial_match(column, column_name)
if score > best_score:
best_match = column
best_score = score
elif score == best_score:
best_match = None
if best_match:
# ovs.db.schema_helper._keep_table_columns() requires that
# column_name is type of str. Not unicode string
return str(best_match)
elif best_score:
vsctl_fatal('%s contains more than one column whose name '
'matches "%s"' % (table_name, column_name))
else:
vsctl_fatal('%s does not contain a column whose name matches '
'"%s"' % (table_name, column_name))
def _pre_get_column(self, _ctx, table_name, column):
column_name = self._get_column(table_name, column)
self.schema_helper.register_columns(table_name, [column_name])
def _pre_get(self, ctx, table_name, columns):
vsctl_table = self._pre_get_table(ctx, table_name)
for column in columns:
self._pre_get_column(ctx, vsctl_table.table_name, column)
def _pre_cmd_get(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [ctx.parse_column_key_value(table_schema, column_key)[0]
for column_key in command.args[2:]]
self._pre_get(ctx, table_name, columns)
def _get(self, ctx, table_name, record_id, column_keys,
id_=None, if_exists=False):
"""
:type column_keys: list of (column, key_string)
where column and key are str
"""
vsctl_table = self._get_table(table_name)
row = ctx.must_get_row(vsctl_table, record_id)
if id_:
raise NotImplementedError() # TODO:XXX
symbol, new = ctx.create_symbol(id_)
if not new:
vsctl_fatal('row id "%s" specified on "get" command was used '
'before it was defined' % id_)
symbol.uuid = row.uuid
symbol.strong_ref = True
values = []
for column, key_string in column_keys:
row.verify(column)
datum = getattr(row, column)
if key_string:
if type(datum) != dict:
vsctl_fatal('cannot specify key to get for non-map column '
'%s' % column)
values.append(datum[key_string])
else:
values.append(datum)
return values
def _cmd_get(self, ctx, command):
id_ = None # TODO:XXX --id
if_exists = command.has_option('--if-exists')
table_name = command.args[0]
record_id = command.args[1]
table_schema = self.schema.tables[table_name]
column_keys = [ctx.parse_column_key_value(table_schema, column_key)[:2]
for column_key in command.args[2:]]
values = self._get(ctx, table_name, record_id, column_keys,
id_, if_exists)
command.result = values
def _pre_cmd_find(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [ctx.parse_column_key_value(table_schema,
column_key_value)[0]
for column_key_value in command.args[1:]]
LOG.debug('columns %s', columns)
self._pre_get(ctx, table_name, columns)
def _check_value(self, ovsrec_row, column_key_value):
column, key, value_json = column_key_value
column_schema = ovsrec_row._table.columns[column]
value = ovs.db.data.Datum.from_json(
column_schema.type, value_json).to_python(ovs.db.idl._uuid_to_row)
datum = getattr(ovsrec_row, column)
if key is None:
if datum == value:
return True
else:
if datum[key] != value:
return True
return False
def _find(self, ctx, table_name, column_key_values):
result = []
for ovsrec_row in ctx.idl.tables[table_name].rows.values():
LOG.debug('ovsrec_row %s', ovsrec_row_to_string(ovsrec_row))
if all(self._check_value(ovsrec_row, column_key_value)
for column_key_value in column_key_values):
result.append(ovsrec_row)
return result
def _cmd_find(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
column_key_values = [ctx.parse_column_key_value(table_schema,
column_key_value)
for column_key_value in command.args[1:]]
command.result = self._find(ctx, table_name, column_key_values)
def _check_mutable(self, table_name, column):
column_schema = self.schema.tables[table_name].columns[column]
if not column_schema.mutable:
vsctl_fatal('cannot modify read-only column %s in table %s' %
(column, table_name))
def _pre_set(self, ctx, table_name, columns):
self._pre_get_table(ctx, table_name)
for column in columns:
self._pre_get_column(ctx, table_name, column)
self._check_mutable(table_name, column)
def _pre_cmd_set(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [ctx.parse_column_key_value(table_schema,
column_key_value)[0]
for column_key_value in command.args[2:]]
self._pre_set(ctx, table_name, columns)
def _set(self, ctx, table_name, record_id, column_key_values):
"""
:type column_key_values: list of (column, key_string, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, key, value in column_key_values:
ctx.set_column(ovsrec_row, column, key, value)
ctx.invalidate_cache()
def _cmd_set(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
# column_key_value: <column>[:<key>]=<value>
table_schema = self.schema.tables[table_name]
column_key_values = [ctx.parse_column_key_value(table_schema,
column_key_value)
for column_key_value in command.args[2:]]
self._set(ctx, table_name, record_id, column_key_values)
def _pre_clear(self, ctx, table_name, column):
self._pre_get_table(ctx, table_name)
self._pre_get_column(ctx, table_name, column)
self._check_mutable(table_name, column)
def _pre_cmd_clear(self, ctx, command):
table_name = command.args[0]
column = command.args[2]
self._pre_clear(ctx, table_name, column)
def _clear(self, ctx, table_name, record_id, column):
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
column_schema = ctx.idl.tables[table_name].columns[column]
if column_schema.type.n_min > 0:
vsctl_fatal('"clear" operation cannot be applied to column %s '
'of table %s, which is not allowed to be empty' %
(column, table_name))
# assuming that default datum is empty.
default_datum = ovs.db.data.Datum.default(column_schema.type)
setattr(ovsrec_row, column,
default_datum.to_python(ovs.db.idl._uuid_to_row))
ctx.invalidate_cache()
def _cmd_clear(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
column = command.args[2]
self._clear(ctx, table_name, record_id, column)
#
# Create constants from ovs db schema
#
def schema_print(schema_location, prefix):
prefix = prefix.upper()
json = ovs.json.from_file(schema_location)
schema = ovs.db.schema.DbSchema.from_json(json)
print('# Do NOT edit.')
print('# This is automatically generated.')
print('# created based on version %s' % (schema.version or 'unknown'))
print('')
print('')
print('%s_DB_NAME = \'%s\'' % (prefix, schema.name))
for table in sorted(schema.tables.values(),
key=operator.attrgetter('name')):
print('')
print('%s_TABLE_%s = \'%s\'' % (prefix,
table.name.upper(), table.name))
for column in sorted(table.columns.values(),
key=operator.attrgetter('name')):
print('%s_%s_COL_%s = \'%s\'' % (prefix, table.name.upper(),
column.name.upper(),
column.name))
def main():
if len(sys.argv) <= 2:
print('Usage: %s <schema file> <prefix>' % sys.argv[0])
location = sys.argv[1]
prefix = sys.argv[2]
schema_print(location, prefix)
if __name__ == '__main__':
main()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for make_template."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
import tensorflow as tf
from tensorflow.python.ops import template
def var_scoped_function():
return tf.get_variable("dummy",
shape=[1],
initializer=tf.zeros_initializer)
def internally_var_scoped_function(scope_name):
with tf.variable_scope(scope_name):
return tf.get_variable("dummy",
shape=[1],
initializer=tf.zeros_initializer)
def function_with_create(trainable):
"""Creates a variable as a side effect using tf.Variable."""
tf.Variable(0, trainable=trainable)
return tf.get_variable("dummy",
shape=[1],
initializer=tf.zeros_initializer)
class TemplateTest(tf.test.TestCase):
def test_end_to_end(self):
"""This test shows a very simple line model with test_loss.
The template is used to share parameters between a training and test model.
"""
# y = 2x + 1
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
tf.set_random_seed(1234)
def test_line(x):
m = tf.get_variable("w", shape=[],
initializer=tf.truncated_normal_initializer())
b = tf.get_variable("b", shape=[],
initializer=tf.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
train_prediction = line_template(training_input)
test_prediction = line_template(test_input)
train_loss = tf.reduce_mean(tf.square(train_prediction - training_output))
test_loss = tf.reduce_mean(tf.square(test_prediction - test_output))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train_op = optimizer.minimize(train_loss)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
initial_test_loss = sess.run(test_loss)
sess.run(train_op)
final_test_loss = sess.run(test_loss)
# Parameters are tied, so the loss should have gone down when we trained it.
self.assertLess(final_test_loss, initial_test_loss)
def test_skip_stack_frames(self):
first = traceback.format_stack()
second = traceback.format_stack()
result = template._skip_common_stack_elements(first, second)
self.assertEqual(1, len(result))
self.assertNotEqual(len(first), len(result))
def test_template_with_name(self):
tmpl1 = template.make_template("s1", var_scoped_function)
tmpl2 = template.make_template("s1", var_scoped_function)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
self.assertEqual("s1_2/dummy:0", v3.name)
def test_template_in_scope(self):
tmpl1 = template.make_template("s1", var_scoped_function)
tmpl2 = template.make_template("s1", var_scoped_function)
with tf.variable_scope("scope"):
v1 = tmpl1()
v3 = tmpl2()
# The template contract requires the following to ignore scope2.
with tf.variable_scope("scope2"):
v2 = tmpl1()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("scope/s1/dummy:0", v1.name)
self.assertEqual("scope/s1_1/dummy:0", v3.name)
def test_template_with_internal_reuse(self):
tmpl1 = template.make_template("s1", internally_var_scoped_function)
tmpl2 = template.make_template("s1", internally_var_scoped_function)
v1 = tmpl1("test")
v2 = tmpl1("test")
v3 = tmpl2("test")
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_2/test/dummy:0", v3.name)
with self.assertRaises(ValueError):
tmpl1("not_test")
def test_template_without_name(self):
with self.assertRaises(ValueError):
template.make_template(None, var_scoped_function)
def test_make_template(self):
# Test both that we can call it with positional and keywords.
tmpl1 = template.make_template(
"s1", internally_var_scoped_function, scope_name="test")
tmpl2 = template.make_template(
"s1", internally_var_scoped_function, scope_name="test")
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_2/test/dummy:0", v3.name)
def test_enforces_no_extra_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=True)
tmpl()
with self.assertRaises(ValueError):
tmpl()
def test_permits_extra_non_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=False)
self.assertEqual(tmpl(), tmpl())
def test_internal_variable_reuse(self):
def nested():
with tf.variable_scope("nested") as vs:
v1 = tf.get_variable("x", initializer=tf.zeros_initializer, shape=[])
with tf.variable_scope(vs, reuse=True):
v2 = tf.get_variable("x")
self.assertEqual(v1, v2)
return v1
tmpl1 = template.make_template("s1", nested)
tmpl2 = template.make_template("s1", nested)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/nested/x:0", v1.name)
self.assertEqual("s1_2/nested/x:0", v3.name)
def test_nested_templates(self):
def nested_template():
nested1 = template.make_template("nested", var_scoped_function)
nested2 = template.make_template("nested", var_scoped_function)
v1 = nested1()
v2 = nested2()
self.assertNotEqual(v1, v2)
return v2
tmpl1 = template.make_template("s1", nested_template)
tmpl2 = template.make_template("s1", nested_template)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/nested_1/dummy:0", v1.name)
self.assertEqual("s1_2/nested_1/dummy:0", v3.name)
def test_immediate_scope_creation(self):
# Create templates in scope a then call in scope b. make_template should
# capture the scope the first time it is called, and make_immediate_template
# should capture the scope at construction time.
with tf.variable_scope("ctor_scope"):
tmpl_immed = template.make_template(
"a", var_scoped_function, True) # create scope here
tmpl_defer = template.make_template(
"b", var_scoped_function, False) # default: create scope at __call__
with tf.variable_scope("call_scope"):
inner_imm_var = tmpl_immed()
inner_defer_var = tmpl_defer()
outer_imm_var = tmpl_immed()
outer_defer_var = tmpl_defer()
self.assertNotEqual(inner_imm_var, inner_defer_var)
self.assertEqual(outer_imm_var, inner_imm_var)
self.assertEqual(outer_defer_var, inner_defer_var)
self.assertEqual("ctor_scope/a/dummy:0", inner_imm_var.name)
self.assertEqual("call_scope/b/dummy:0", inner_defer_var.name)
if __name__ == "__main__":
tf.test.main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A standalone utility for computing the log moments.
The utility for computing the log moments. It consists of two methods.
compute_log_moment(q, sigma, T, lmbd) computes the log moment with sampling
probability q, noise sigma, order lmbd, and T steps. get_privacy_spent computes
delta (or eps) given log moments and eps (or delta).
Example use:
Suppose that we have run an algorithm with parameters, an array of
(q1, sigma1, T1) ... (qk, sigmak, Tk), and we wish to compute eps for a given
delta. The example code would be:
max_lmbd = 32
lmbds = xrange(1, max_lmbd + 1)
log_moments = []
for lmbd in lmbds:
log_moment = 0
for q, sigma, T in parameters:
log_moment += compute_log_moment(q, sigma, T, lmbd)
log_moments.append((lmbd, log_moment))
eps, delta = get_privacy_spent(log_moments, target_delta=delta)
To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
from sympy.mpmath import mp
def _to_np_float64(v):
if math.isnan(v) or math.isinf(v):
return np.inf
return np.float64(v)
######################
# FLOAT64 ARITHMETIC #
######################
def pdf_gauss(x, sigma, mean=0):
return scipy.stats.norm.pdf(x, loc=mean, scale=sigma)
def cropped_ratio(a, b):
if a < 1E-50 and b < 1E-50:
return 1.
else:
return a / b
def integral_inf(fn):
integral, _ = integrate.quad(fn, -np.inf, np.inf)
return integral
def integral_bounded(fn, lb, ub):
integral, _ = integrate.quad(fn, lb, ub)
return integral
def distributions(sigma, q):
mu0 = lambda y: pdf_gauss(y, sigma=sigma, mean=0.0)
mu1 = lambda y: pdf_gauss(y, sigma=sigma, mean=1.0)
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
a_lambda_first_term_exact = 0
a_lambda_second_term_exact = 0
for i in xrange(lmbd_int + 1):
coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
s1, s2 = 0, 0
for j in xrange(i + 1):
coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
a_lambda_first_term_exact += coef_i * s1
a_lambda_second_term_exact += coef_i * s2
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print "A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact)
return _to_np_float64(a_lambda_exact)
def compute_b(sigma, q, lmbd, verbose=False):
mu0, _, mu = distributions(sigma, q)
b_lambda_fn = lambda z: mu0(z) * np.power(cropped_ratio(mu0(z), mu(z)), lmbd)
b_lambda = integral_inf(b_lambda_fn)
m = sigma ** 2 * (np.log((2. - q) / (1. - q)) + 1. / (2 * sigma ** 2))
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print "M =", m
print "f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu0(z), mu(z)), lmbd))
b_lambda_int2_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu(z), mu0(z)), lmbd))
b_int1 = integral_bounded(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print "B: by numerical integration", b_lambda
print "B must be no more than ", b_bound
print b_lambda, b_bound
return _to_np_float64(b_lambda)
###########################
# MULTIPRECISION ROUTINES #
###########################
def pdf_gauss_mp(x, sigma, mean):
return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
- (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2))
def integral_inf_mp(fn):
integral, _ = mp.quad(fn, [-mp.inf, mp.inf], error=True)
return integral
def integral_bounded_mp(fn, lb, ub):
integral, _ = mp.quad(fn, [lb, ub], error=True)
return integral
def distributions_mp(sigma, q):
mu0 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(0))
mu1 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(1))
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, mu1, mu = distributions_mp(sigma, q)
a_lambda_fn = lambda z: mu(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_first_term_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_second_term_fn = lambda z: mu1(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda = integral_inf_mp(a_lambda_fn)
a_lambda_first_term = integral_inf_mp(a_lambda_first_term_fn)
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print "A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term)
return _to_np_float64(a_lambda)
def compute_b_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, _, mu = distributions_mp(sigma, q)
b_lambda_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda = integral_inf_mp(b_lambda_fn)
m = sigma ** 2 * (mp.log((2 - q) / (1 - q)) + 1 / (2 * (sigma ** 2)))
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print "M =", m
print "f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda_int2_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
b_int1 = integral_bounded_mp(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded_mp(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a_mp(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print "B by numerical integration", b_lambda
print "B must be no more than ", b_bound
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
def _compute_delta(log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
def _compute_eps(log_moments, delta):
"""Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
"""
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
"""Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf.
"""
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if not np.isinf(moment_a_mp):
# The following test fails for (1, np.inf)!
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return np.log(moment) * steps
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
"""Compute delta (or eps) for given eps (or delta) from log moments.
Args:
log_moments: array of (moment_order, log_moment) pairs.
target_eps: if not None, the epsilon for which we would like to compute
corresponding delta value.
target_delta: if not None, the delta for which we would like to compute
corresponding epsilon value. Exactly one of target_eps and target_delta
is None.
Returns:
eps, delta pair
"""
assert (target_eps is None) ^ (target_delta is None)
assert not ((target_eps is None) and (target_delta is None))
if target_eps is not None:
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta)
| |
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Elastic Block Storage Volume
"""
from boto.resultset import ResultSet
from boto.ec2.tag import Tag
from boto.ec2.ec2object import TaggedEC2Object
class Volume(TaggedEC2Object):
"""
Represents an EBS volume.
:ivar id: The unique ID of the volume.
:ivar create_time: The timestamp of when the volume was created.
:ivar status: The status of the volume.
:ivar size: The size (in GB) of the volume.
:ivar snapshot_id: The ID of the snapshot this volume was created
from, if applicable.
:ivar attach_data: An AttachmentSet object.
:ivar zone: The availability zone this volume is in.
:ivar type: The type of volume (standard or consistent-iops)
:ivar iops: If this volume is of type consistent-iops, this is
the number of IOPS provisioned (10-300).
"""
def __init__(self, connection=None):
super(Volume, self).__init__(connection)
self.id = None
self.create_time = None
self.status = None
self.size = None
self.snapshot_id = None
self.attach_data = None
self.zone = None
self.type = None
self.iops = None
def __repr__(self):
return 'Volume:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(Volume, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'attachmentSet':
self.attach_data = AttachmentSet()
return self.attach_data
elif name == 'tagSet':
self.tags = ResultSet([('item', Tag)])
return self.tags
else:
return None
def endElement(self, name, value, connection):
if name == 'volumeId':
self.id = value
elif name == 'createTime':
self.create_time = value
elif name == 'status':
if value != '':
self.status = value
elif name == 'size':
self.size = int(value)
elif name == 'snapshotId':
self.snapshot_id = value
elif name == 'availabilityZone':
self.zone = value
elif name == 'volumeType':
self.type = value
elif name == 'iops':
self.iops = int(value)
else:
setattr(self, name, value)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
"""
Update the data associated with this volume by querying EC2.
:type validate: bool
:param validate: By default, if EC2 returns no data about the
volume the update method returns quietly. If
the validate param is True, however, it will
raise a ValueError exception if no data is
returned from EC2.
"""
# Check the resultset since Eucalyptus ignores the volumeId param
unfiltered_rs = self.connection.get_all_volumes(
[self.id],
dry_run=dry_run
)
rs = [x for x in unfiltered_rs if x.id == self.id]
if len(rs) > 0:
self._update(rs[0])
elif validate:
raise ValueError('%s is not a valid Volume ID' % self.id)
return self.status
def delete(self, dry_run=False):
"""
Delete this EBS volume.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_volume(self.id, dry_run=dry_run)
def attach(self, instance_id, device, dry_run=False):
"""
Attach this EBS volume to an EC2 instance.
:type instance_id: str
:param instance_id: The ID of the EC2 instance to which it will
be attached.
:type device: str
:param device: The device on the instance through which the
volume will be exposed (e.g. /dev/sdh)
:rtype: bool
:return: True if successful
"""
return self.connection.attach_volume(
self.id,
instance_id,
device,
dry_run=dry_run
)
def detach(self, force=False, dry_run=False):
"""
Detach this EBS volume from an EC2 instance.
:type force: bool
:param force: Forces detachment if the previous detachment
attempt did not occur cleanly. This option can lead to
data loss or a corrupted file system. Use this option only
as a last resort to detach a volume from a failed
instance. The instance will not have an opportunity to
flush file system caches nor file system meta data. If you
use this option, you must perform file system check and
repair procedures.
:rtype: bool
:return: True if successful
"""
instance_id = None
if self.attach_data:
instance_id = self.attach_data.instance_id
device = None
if self.attach_data:
device = self.attach_data.device
return self.connection.detach_volume(
self.id,
instance_id,
device,
force,
dry_run=dry_run
)
def create_snapshot(self, description=None, dry_run=False):
"""
Create a snapshot of this EBS Volume.
:type description: str
:param description: A description of the snapshot.
Limited to 256 characters.
:rtype: :class:`boto.ec2.snapshot.Snapshot`
:return: The created Snapshot object
"""
return self.connection.create_snapshot(
self.id,
description,
dry_run=dry_run
)
def volume_state(self):
"""
Returns the state of the volume. Same value as the status attribute.
"""
return self.status
def attachment_state(self):
"""
Get the attachment state.
"""
state = None
if self.attach_data:
state = self.attach_data.status
return state
def snapshots(self, owner=None, restorable_by=None, dry_run=False):
"""
Get all snapshots related to this volume. Note that this requires
that all available snapshots for the account be retrieved from EC2
first and then the list is filtered client-side to contain only
those for this volume.
:type owner: str
:param owner: If present, only the snapshots owned by the
specified user will be returned. Valid values are:
* self
* amazon
* AWS Account ID
:type restorable_by: str
:param restorable_by: If present, only the snapshots that
are restorable by the specified account id will be returned.
:rtype: list of L{boto.ec2.snapshot.Snapshot}
:return: The requested Snapshot objects
"""
rs = self.connection.get_all_snapshots(
owner=owner,
restorable_by=restorable_by,
dry_run=dry_run
)
mine = []
for snap in rs:
if snap.volume_id == self.id:
mine.append(snap)
return mine
class AttachmentSet(object):
"""
Represents an EBS attachmentset.
:ivar id: The unique ID of the volume.
:ivar instance_id: The unique ID of the attached instance
:ivar status: The status of the attachment
:ivar attach_time: Attached since
:ivar device: The device the instance has mapped
"""
def __init__(self):
self.id = None
self.instance_id = None
self.status = None
self.attach_time = None
self.device = None
def __repr__(self):
return 'AttachmentSet:%s' % self.id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'volumeId':
self.id = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'status':
self.status = value
elif name == 'attachTime':
self.attach_time = value
elif name == 'device':
self.device = value
else:
setattr(self, name, value)
class VolumeAttribute(object):
def __init__(self, parent=None):
self.id = None
self._key_name = None
self.attrs = {}
def startElement(self, name, attrs, connection):
if name == 'autoEnableIO':
self._key_name = name
return None
def endElement(self, name, value, connection):
if name == 'value':
if value.lower() == 'true':
self.attrs[self._key_name] = True
else:
self.attrs[self._key_name] = False
elif name == 'volumeId':
self.id = value
else:
setattr(self, name, value)
| |
import threading
import collections
import time
import logging
from listener import SiteStream, FOLLOW_LIMIT
from thread import ListenThread
from util import grouper
from parser import DefaultParser
logger = logging.getLogger("sitebucket")
NONFULL_STREAM_LIMIT = 10
RESTART_DEAD_STREAMS = True
MONITOR_SLEEP_INTERVAL = 10
CONSOLIDATE_SLEEP_INTERVAL = 30
class ListenThreadMonitor(threading.Thread):
'''The ListenThreadMonitor takes a follow list of any size, creates
ListenThread objects and corresponding SiteStream objects for the follow
list and initializes them. By default, the monitor will attempt to restart
any connections that fail.
Keyword arguments:
* follow -- a list of users that have authenticated your app to follow
* stream_with -- 'user' or 'followings'. A value of 'user' will cause the stream to only return data about actions the users specified in follow take. A value of 'followings' will cause the Stream object to return data about the user's followings (basically their home timeline). This defaults to 'user'.
* consumer -- a python-oauth2 Consumer object for the app
* token -- a python-oauth2 Token object for the app's owner account.
* parser -- an object that extends BaseParser that will handle data returned by the stream.
The monitor's run method blocks, so invoke it via start method if you want
to run it in a separate thread.
To use, first import ListenThreadMonitor and oauth2:
>>> from sitebucket import ListenThreadMonitor
>>> import oauth2
Then generate your Consumer and Token objects:
>>> token = oauth2.Token('key', 'secret')
>>> consumer = oauth2.Consumer('key', 'secret')
And then instantiate your ListenThreadMonitor object:
>>> monitor = ListenThreadMonitor([1,2,3], consumer, token)
Calling run will start the streaming connections and block until you kill
the process:
>>> monitor.run() #doctest: +SKIP
Calling start will start the monitor loop in a separate thread:
>>> monitor.start() #doctest: +SKIP
It can be killed later via the disconnect method:
>>> monitor.disconnect()
'''
def __init__(self, follow, consumer, token, stream_with="user",
parser=DefaultParser(), *args, **kwargs):
'''Returns a ListenThreadMonitor object. Parameters are identical to
the SiteStream object.'''
# Make sure follow is iterable.
if not isinstance(follow, collections.Iterable):
follow = [follow]
follow.sort()
self.follow = follow
self.consumer = consumer
self.token = token
self.stream_with = stream_with
self.parser = parser
self.threads = self.__create_thread_objects(follow, stream_with)
self.disconnect_issued = False
self.running = False
super(ListenThreadMonitor, self).__init__(*args, **kwargs)
def __create_thread_objects(self, follow, stream_with):
'''Split the specified follow list into groups of CONNECTION_LIMIT
or smaller and then create ListenThread objects for those groups.
>>> follow = range(1,1001)
>>> monitor = ListenThreadMonitor(follow, consumer, token)
>>> len(monitor.threads) >= len(follow)/FOLLOW_LIMIT
True
'''
threads = []
chunks = list(grouper(FOLLOW_LIMIT, follow))
for follow in chunks:
stream = SiteStream(follow, self.consumer, self.token,
stream_with, self.parser)
thread = ListenThread(stream)
thread.daemon = True
threads.append(thread)
logger.debug("Created %s new thread objects." % len(threads))
return threads
def run(self):
'''Starts all threads and begins monitoring loop. Invoke this via
the object's start method to run the monitor in a separate thread.
>>> monitor = ListenThreadMonitor([1, 2, 3], consumer, token)
>>> monitor.run() #doctest: +SKIP
'''
if not self.disconnect_issued:
logger.info("Starting threads...")
[thread.start() for thread in self.threads]
while not self.disconnect_issued:
self.running = True
if RESTART_DEAD_STREAMS:
self.restart_unhealthy_streams()
if len(self.nonfull_streams) > NONFULL_STREAM_LIMIT:
self.consolidate_streams()
if self.disconnect_issued:
logger.info("Disconnect issued. Issuing shutdown requests to streams...")
while self.threads:
thread = self.threads.pop()
thread.close()
logger.info("Monitor terminating...")
else:
time.sleep(MONITOR_SLEEP_INTERVAL)
self.running = False
def add_follows(self, follow, start=True):
'''Creates and adds new ListenThreads based on a specified follow
list. Optionally starts the new threads.
* follow -- list of users to start following
* start -- (default: True) If true, start running the new threads.
>>> follow = range(1,FOLLOW_LIMIT*10+1)
>>> monitor = ListenThreadMonitor([], consumer, token)
>>> monitor.add_follows(follow, start=False)
>>> len(monitor.threads) >= len(follow)/FOLLOW_LIMIT
True
'''
threads = self.__create_thread_objects(follow, self.stream_with)
if start:
[thread.start() for thread in threads]
self.threads.extend(threads)
def consolidate_streams(self):
'''Find all streams that aren't following the maximum number of users
they are permitted to follow and consolidate them into the smallest
number of streaming connections possible.
>>> monitor = ListenThreadMonitor([], consumer, token)
>>> monitor.add_follows([1,], start=False)
>>> monitor.add_follows([2,], start=False)
>>> monitor.add_follows([3,], start=False)
>>> len(monitor.threads)
3
>>> monitor.consolidate_streams()
>>> len(monitor.threads)
1
'''
logger.info("Attempting to minimize active stream connections.")
nonfull_streams = self.nonfull_streams
# Create a list of all the users in nonfull_streams
follow = []
[follow.extend(x.stream.follow) for x in nonfull_streams]
consolidated_threads = \
self.__create_thread_objects(follow, self.stream_with)
# We only need to do work if our set of consolidated threads is
# actually smaller than what we have currently
if len(consolidated_threads) >= len(nonfull_streams):
return
if self.running:
[x.start() for x in consolidated_threads]
# Sleep after starting up the new threads to give them time to
# catch up.
# TODO: What we really need to do here is start the new threads
# and wait for them to start receiving duplicate data
# (The best way to do this is to wrap the stream's parsers with
# some sort of object that keeps track of that. )
time.sleep(CONSOLIDATE_SLEEP_INTERVAL)
# Remove the old threads from the thread list and disconnect them.
for x in nonfull_streams:
self.threads.remove(x)
x.close()
# Add the new threads to the thread list
logger.info("Reduced stream connections by %s" \
% (len(nonfull_streams) - len(consolidated_threads)))
self.threads.extend(consolidated_threads)
def restart_unhealthy_streams(self):
'''Restart all unhealthy streaming ListenThreads.'''
unhealthy = self.unhealthy_streams
if len(unhealthy) > 0:
logger.info('%s unhealthy streams detected.' % len(unhealthy))
[self.threads.remove(x) for x in unhealthy]
[self.threads.append(x.restart()) for x in unhealthy]
def disconnect(self):
'''Sets the disconnect flag to True, which will cause the monitor's
loop to terminate.
>>> monitor = ListenThreadMonitor([], consumer, token)
>>> monitor.disconnect()
>>> monitor.run()
'''
logger.debug("Disconnect received.")
self.disconnect_issued = True
@property
def nonfull_streams(self):
''' Returns a list of threads that aren't following a number of users
equal to FOLLOW_LIMIT.
>>> follow = range(1,FOLLOW_LIMIT+2)
>>> monitor = ListenThreadMonitor(follow, consumer, token)
>>> len(monitor.nonfull_streams) == 1
True
'''
return [x for x in self.threads \
if len(x.stream.follow) < NONFULL_STREAM_LIMIT]
@property
def healthy_streams(self):
''' Returns a list of all threads that are healthy, uninitialized,
or connecting.
>>> monitor = ListenThreadMonitor([1], consumer, token)
>>> len(monitor.healthy_streams) == 1
True
'''
return [x for x in self.threads if x.connection_healthy]
@property
def unhealthy_streams(self):
'''Returns a list of all threads with failed connections.
>>> monitor = ListenThreadMonitor([], consumer, token)
>>> monitor.unhealthy_streams
[]
>>> failed_thread = ListenThread(failed_stream)
>>> monitor.threads.append(failed_thread)
>>> monitor.unhealthy_streams[0] == failed_thread
True
'''
return [x for x in self.threads if x.connection_healthy == False]
| |
from os import path
import re
from lib.pybars import Compiler
from lib.utils.DevTreeUtils import DevTree
from lib.utils.Logger import Log
from lib.utils.ProcessUtils import ExecutableRunner
UINT32 = "uint32"
HEADER_TEMPLATE = "{{#each commands}}{{>command}}{{>br}}{{/each}}"
COMMAND_STRUCT_TEMPLATE = """
struct {{name}}Command : public RenderHardwareCommand {
public:
{{name}}Command(
{{#each attributes}}
{{#if ref_param}}const{{/if}}
{{{type}}}
{{#if ref_param}}&{{/if}}
{{name}}
{{#unless @last}}, {{/unless}}
{{/each}})
{{#unless attributes}} = default;{{/unless}}
{{#each attributes}}
{{#if @first}}:{{/if}}
{{name}}({{name}})
{{#if @last}}
{}
{{else}}
,
{{/if}}
{{/each}}
virtual ~{{name}}Command() = default;{{>br}}{{>br}}
virtual void Execute(RenderHardwareInterface& rhi) const {
rhi.{{name}}(
{{#each attributes}}
{{name}}
{{#unless @last}},{{/unless}}
{{/each}}
);
}{{>br}}{{>br}}
virtual void Visualize(std::ostream& ostream) const {
{{{visualizer}}}
}{{>br}}{{>br}}
{{#each attributes}}
const {{{type}}} {{name}};
{{/each}}
};{{>br}}
"""
COMMAND_METHOD_TEMPLATE = """
virtual void {{name}}(
{{#each attributes}}
{{#if ref_param}}const{{/if}}
{{{type}}}
{{#if ref_param}}&{{/if}}
{{name}}
{{#unless @last}},{{/unless}}
{{/each}}
) = 0;
"""
class RHICommand:
def __init__(self, command_name, attributes=list(), visualizer=None):
self.name = command_name
self.attributes = attributes
self.visualizer = visualizer if visualizer else 'ostream << "{0}";'.format(command_name)
def to_dict(self):
return {"name": self.name, "attributes": [attrib.to_dict() for attrib in self.attributes], 'visualizer': self.visualizer}
class RHICmdAttribute:
def __init__(self, name, type, ref_param=True):
self.name = name
self.type = type
self.ref_param = ref_param
def to_dict(self):
return {"name": self.name, "type": self.type, "ref_param": self.ref_param}
rhi_commands = [
RHICommand("SetViewport", [
RHICmdAttribute("x", "int32", False),
RHICmdAttribute("y", "int32", False),
RHICmdAttribute("width", "int32", False),
RHICmdAttribute("height", "int32", False)
], """
ostream << "SetViewport {";
ostream << "x: " << x;
ostream << " y: " << y;
ostream << " width: " << width;
ostream << " height: " << height;
ostream << "}";
"""),
RHICommand("SetScissorRect", [
RHICmdAttribute("x", "int32", False),
RHICmdAttribute("y", "int32", False),
RHICmdAttribute("width", "int32", False),
RHICmdAttribute("height", "int32", False)
]),
RHICommand("SetRenderTargets", [
RHICmdAttribute("colorTargets", "std::vector<RenderTarget>"),
RHICmdAttribute("depthStencilTarget", "RenderTarget")
]),
RHICommand("EnableVertexBinding", [
RHICmdAttribute("vertexBinding", "std::shared_ptr<VertexBinding>")
]),
RHICommand("EnableShaderProgram", [
RHICmdAttribute("shaderProgram", "std::shared_ptr<ShaderProgram>")
]),
RHICommand("BindTexture2D", [
RHICmdAttribute("texture", "std::shared_ptr<Texture2D>"),
RHICmdAttribute("textureSlot", "uint32")
]),
RHICommand("SetUniformParameter", [
RHICmdAttribute("uniformParameter", "std::shared_ptr<UniformParameter::UniformIndex>"),
RHICmdAttribute("value", "glm::mat4"),
]),
RHICommand("SetIntUniformParameter", [
RHICmdAttribute("uniformParameter", "std::shared_ptr<UniformParameter::UniformIndex>"),
RHICmdAttribute("value", "uint32"),
]),
RHICommand("DrawIndexedPrimitive", [
RHICmdAttribute("indexBuffer", "std::shared_ptr<IndexBuffer>"),
RHICmdAttribute("firstIndex", "uint32"),
RHICmdAttribute("lastIndex", "uint32"),
]),
RHICommand("ClearFramebuffer", [
RHICmdAttribute("clearColor", "bool"),
RHICmdAttribute("clearDepth", "bool"),
RHICmdAttribute("clearStencil", "bool")
]),
RHICommand("BeginFrame", [
RHICmdAttribute("viewportClient", "std::shared_ptr<ViewportClient>")
]),
RHICommand("EndFrame", [
RHICmdAttribute("viewportClient", "std::shared_ptr<ViewportClient>"),
RHICmdAttribute("presentSource", "RenderTarget")
]),
]
def generate_command_structs():
working_dir = DevTree.source_dir
header_file = path.join(working_dir, "glove", "rendering", "RenderHardwareCommands.hpp")
compiler = Compiler()
header_template = compiler.compile(HEADER_TEMPLATE)
command_struct_template = compiler.compile(COMMAND_STRUCT_TEMPLATE)
br_template = compiler.compile("##br##")
template_output = header_template({'commands': [cmd.to_dict() for cmd in rhi_commands]},
partials={"command": command_struct_template, "br": br_template})
generate_regex = re.compile(r"(// \[GENERATE:COMMANDS\]\n)(.*)(// \[\\GENERATE:COMMANDS\])", re.M | re.S)
output = "\g<1>"
output += str(template_output).replace('\n', '').replace('##br##', '\n')
output += "\g<3>"
new_header_content = ""
with open(header_file, 'r') as file:
new_header_content = generate_regex.sub(output, file.read())
with open(header_file, 'w+') as file:
file.write(new_header_content)
clang_format_runner = ExecutableRunner('clang-format', ['-i', header_file], working_dir,
lambda: (Log.info("Formatted RHI commands header")),
lambda: (Log.error("Failed to format RHI commands header")))
clang_format_runner.run()
def generate_command_methods():
working_dir = DevTree.source_dir
header_file = path.join(working_dir, "glove", "rendering", "RenderHardwareInterface.hpp")
compiler = Compiler()
header_template = compiler.compile(HEADER_TEMPLATE)
command_method_template = compiler.compile(COMMAND_METHOD_TEMPLATE)
br_template = compiler.compile("##br##")
template_output = header_template({'commands': [cmd.to_dict() for cmd in rhi_commands]},
partials={"command": command_method_template, "br": br_template})
generate_regex = re.compile(r"(// \[GENERATE:COMMAND_METHODS\]\n)(.*)(// \[\\GENERATE:COMMAND_METHODS\])", re.M | re.S)
output = "\g<1>"
output += str(template_output).replace('\n', '').replace('##br##', '\n')
output += "\g<3>"
new_header_content = ""
with open(header_file, 'r') as file:
new_header_content = generate_regex.sub(output, file.read())
with open(header_file, 'w+') as file:
file.write(new_header_content)
clang_format_runner = ExecutableRunner('clang-format', ['-i', header_file], working_dir,
lambda: (Log.info("Formatted RHI methods header")),
lambda: (Log.error("Failed to format RHI commands header")))
clang_format_runner.run()
def execute_command(args):
Log.info("Generating RHI commands...")
generate_command_structs()
generate_command_methods()
Log.info("Generated {0} RHI commands".format(len(rhi_commands)))
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates .h and .rc files for strings extracted from a .grd file.
This script generates an rc file and header (NAME.{rc,h}) to be included in
a build target. The rc file includes translations for strings pulled from the
given .grd file(s) and their corresponding localized .xtb files.
To specify strings that will be extracted, the script pointed to by the
argument "extract-datafile" should contain one or both of the following global
variables:
STRING_IDS is a list of strings IDs we want to import from the .grd files and
include in the generated RC file. These strings are universal for all brands.
MODE_SPECIFIC_STRINGS: is a dictionary of strings for which there are brand
specific values. This mapping provides brand- and mode-specific string ids for a
given input id as described here:
{
resource_id_1: { # A resource ID for use with GetLocalizedString.
brand_1: [ # 'google_chrome', for example.
string_id_1, # Strings listed in order of the brand's modes, as
string_id_2, # specified in install_static::InstallConstantIndex.
...
string_id_N,
],
brand_2: [ # 'chromium', for example.
...
],
},
resource_id_2: ...
}
Note: MODE_SPECIFIC_STRINGS cannot be specified if STRING_IDS is not specified.
"""
# The generated header file includes IDs for each string, but also has values to
# allow getting a string based on a language offset. For example, the header
# file looks like this:
#
# #define IDS_L10N_OFFSET_AR 0
# #define IDS_L10N_OFFSET_BG 1
# #define IDS_L10N_OFFSET_CA 2
# ...
# #define IDS_L10N_OFFSET_ZH_TW 41
#
# #define IDS_MY_STRING_AR 1600
# #define IDS_MY_STRING_BG 1601
# ...
# #define IDS_MY_STRING_BASE IDS_MY_STRING_AR
#
# This allows us to lookup an an ID for a string by adding IDS_MY_STRING_BASE
# and IDS_L10N_OFFSET_* for the language we are interested in.
#
from __future__ import print_function
import argparse
import glob
import io
import os
import sys
from xml import sax
BASEDIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, os.path.join(BASEDIR, '../../../tools/grit'))
sys.path.insert(2, os.path.join(BASEDIR, '../../../tools/python'))
from grit.extern import tclib
class GrdHandler(sax.handler.ContentHandler):
"""Extracts selected strings from a .grd file.
Attributes:
messages: A dict mapping string identifiers to their corresponding messages.
referenced_xtb_files: A list of all xtb files referenced inside the .grd
file.
"""
def __init__(self, string_id_set):
"""Constructs a handler that reads selected strings from a .grd file.
The dict attribute |messages| is populated with the strings that are read.
Args:
string_id_set: An optional set of message identifiers to extract; all
messages are extracted if empty.
"""
sax.handler.ContentHandler.__init__(self)
self.messages = {}
self.referenced_xtb_files = []
self.__id_set = string_id_set
self.__message_name = None
self.__element_stack = []
self.__text_scraps = []
self.__characters_callback = None
def startElement(self, name, attrs):
self.__element_stack.append(name)
if name == 'message':
self.__OnOpenMessage(attrs.getValue('name'))
elif name == 'file':
parent = self.__element_stack[-2]
if parent == 'translations':
self.__OnAddXtbFile(attrs.getValue('path'))
def endElement(self, name):
popped = self.__element_stack.pop()
assert popped == name
if name == 'message':
self.__OnCloseMessage()
def characters(self, content):
if self.__characters_callback:
self.__characters_callback(self.__element_stack[-1], content)
def __IsExtractingMessage(self):
"""Returns True if a message is currently being extracted."""
return self.__message_name is not None
def __OnOpenMessage(self, message_name):
"""Invoked at the start of a <message> with message's name."""
assert not self.__IsExtractingMessage()
self.__message_name = (message_name if (not (self.__id_set) or
message_name in self.__id_set)
else None)
if self.__message_name:
self.__characters_callback = self.__OnMessageText
def __OnMessageText(self, containing_element, message_text):
"""Invoked to handle a block of text for a message."""
if message_text and (containing_element == 'message' or
containing_element == 'ph'):
self.__text_scraps.append(message_text)
def __OnCloseMessage(self):
"""Invoked at the end of a message."""
if self.__IsExtractingMessage():
self.messages[self.__message_name] = ''.join(self.__text_scraps).strip()
self.__message_name = None
self.__text_scraps = []
self.__characters_callback = None
def __OnAddXtbFile(self, xtb_file_path):
"""Adds the xtb file path of a 'file'."""
if os.path.splitext(xtb_file_path)[1].lower() == '.xtb':
self.referenced_xtb_files.append(xtb_file_path)
class XtbHandler(sax.handler.ContentHandler):
"""Extracts selected translations from an .xtd file.
Populates the |lang| and |translations| attributes with the language and
selected strings of an .xtb file. Instances may be re-used to read the same
set of translations from multiple .xtb files.
Attributes:
translations: A mapping of translation ids to strings.
lang: The language parsed from the .xtb file.
"""
def __init__(self, translation_ids):
"""Constructs an instance to parse the given strings from an .xtb file.
Args:
translation_ids: a mapping of translation ids to their string
identifiers list for the translations to be extracted.
"""
sax.handler.ContentHandler.__init__(self)
self.lang = None
self.translations = None
self.__translation_ids = translation_ids
self.__element_stack = []
self.__string_ids = None
self.__text_scraps = []
self.__characters_callback = None
def startDocument(self):
# Clear the lang and translations since a new document is being parsed.
self.lang = ''
self.translations = {}
def startElement(self, name, attrs):
self.__element_stack.append(name)
# translationbundle is the document element, and hosts the lang id.
if len(self.__element_stack) == 1:
assert name == 'translationbundle'
self.__OnLanguage(attrs.getValue('lang'))
if name == 'translation':
self.__OnOpenTranslation(attrs.getValue('id'))
def endElement(self, name):
popped = self.__element_stack.pop()
assert popped == name
if name == 'translation':
self.__OnCloseTranslation()
def characters(self, content):
if self.__characters_callback:
self.__characters_callback(self.__element_stack[-1], content)
def __OnLanguage(self, lang):
self.lang = lang.replace('-', '_').upper()
def __OnOpenTranslation(self, translation_id):
assert self.__string_ids is None
self.__string_ids = self.__translation_ids.get(translation_id)
if self.__string_ids:
self.__characters_callback = self.__OnTranslationText
def __OnTranslationText(self, containing_element, message_text):
if message_text and containing_element == 'translation':
self.__text_scraps.append(message_text)
def __OnCloseTranslation(self):
if self.__string_ids:
translated_string = ''.join(self.__text_scraps).strip()
for string_id in self.__string_ids:
self.translations[string_id] = translated_string
self.__string_ids = None
self.__text_scraps = []
self.__characters_callback = None
class StringRcMaker(object):
"""Makes .h and .rc files containing strings and translations."""
def __init__(self, inputs, expected_xtb_input_files, header_file, rc_file,
brand, first_resource_id, string_ids_to_extract, mode_specific_strings):
"""Constructs a maker.
Args:
inputs: A list of (grd_file, xtb_dir) pairs containing the source data.
expected_xtb_input_files: A list of xtb files that are expected to exist
in the inputs folders. If there is a discrepency between what exists
and what is expected the script will fail.
header_file: The location of the header file to write containing all the
defined string IDs.
rc_file: The location of the rc file to write containing all the string
resources.
brand: The brand to check against when extracting mode-specific strings.
first_resource_id: The starting ID for the generated string resources.
string_ids_to_extract: The IDs of strings we want to import from the .grd
files and include in the generated RC file. These strings are universal
for all brands.
mode_specific_strings: A dictionary of strings that have conditional
values based on the brand's install mode. Refer to the documentation at
the top of this file for more information on the format of the
dictionary.
"""
self.inputs = inputs
self.expected_xtb_input_files = expected_xtb_input_files
self.expected_xtb_input_files.sort()
self.header_file = header_file
self.rc_file = rc_file
self.brand = brand
self.first_resource_id = first_resource_id;
self.string_id_set = set(string_ids_to_extract)
self.mode_specific_strings = mode_specific_strings
self.__AddModeSpecificStringIds()
def MakeFiles(self):
translated_strings = self.__ReadSourceAndTranslatedStrings()
self.__WriteRCFile(translated_strings)
self.__WriteHeaderFile(translated_strings)
class __TranslationData(object):
"""A container of information about a single translation."""
def __init__(self, resource_id_str, language, translation):
self.resource_id_str = resource_id_str
self.language = language
self.translation = translation
def __lt__(self, other):
"""Allow __TranslationDatas to be sorted by id then by language."""
return (self.resource_id_str, self.language) < (other.resource_id_str,
other.language)
def __AddModeSpecificStringIds(self):
"""Adds the mode-specific strings for all of the current brand's install
modes to self.string_id_set."""
for string_id, brands in self.mode_specific_strings.items():
brand_strings = brands.get(self.brand)
if not brand_strings:
raise RuntimeError(
'No strings declared for brand \'%s\' in MODE_SPECIFIC_STRINGS for '
'message %s' % (self.brand, string_id))
self.string_id_set.update(brand_strings)
def __ReadSourceAndTranslatedStrings(self):
"""Reads the source strings and translations from all inputs."""
translated_strings = []
all_xtb_files = []
for grd_file, xtb_dir in self.inputs:
# Get the name of the grd file sans extension.
source_name = os.path.splitext(os.path.basename(grd_file))[0]
# Compute a glob for the translation files.
xtb_pattern = os.path.join(os.path.dirname(grd_file), xtb_dir,
'%s*.xtb' % source_name)
local_xtb_files = [x.replace('\\', '/') for x in glob.glob(xtb_pattern)]
all_xtb_files.extend(local_xtb_files)
translated_strings.extend(
self.__ReadSourceAndTranslationsFrom(grd_file, local_xtb_files))
translated_strings.sort()
all_xtb_files.sort()
if self.expected_xtb_input_files != all_xtb_files:
extra = list(set(all_xtb_files) - set(self.expected_xtb_input_files))
missing = list(set(self.expected_xtb_input_files) - set(all_xtb_files))
error = '''Asserted file list does not match.
Expected input files:
{}
Actual input files:
{}
Missing input files:
{}
Extra input files:
{}
'''
print(error.format('\n'.join(self.expected_xtb_input_files),
'\n'.join(all_xtb_files), '\n'.join(missing),
'\n'.join(extra)))
sys.exit(1)
return translated_strings
def __ReadSourceAndTranslationsFrom(self, grd_file, xtb_files):
"""Reads source strings and translations for a .grd file.
Reads the source strings and all available translations for the messages
identified by self.string_id_set (or all the messages if self.string_id_set
is empty). The source string is used where translations are missing.
Args:
grd_file: Path to a .grd file.
xtb_files: List of paths to .xtb files.
Returns:
An unsorted list of __TranslationData instances.
"""
sax_parser = sax.make_parser()
# Read the source (en-US) string from the .grd file.
grd_handler = GrdHandler(self.string_id_set)
sax_parser.setContentHandler(grd_handler)
sax_parser.parse(grd_file)
source_strings = grd_handler.messages
grd_file_path = os.path.dirname(grd_file)
source_xtb_files = []
for xtb_file in grd_handler.referenced_xtb_files:
relative_xtb_file_path = (
os.path.join(grd_file_path, xtb_file).replace('\\', '/'))
source_xtb_files.append(relative_xtb_file_path)
missing_xtb_files = list(set(source_xtb_files) - set(xtb_files))
# Manually put the source strings as en-US in the list of translated
# strings.
translated_strings = []
for string_id, message_text in source_strings.items():
translated_strings.append(self.__TranslationData(string_id,
'EN_US',
message_text))
# Generate the message ID for each source string to correlate it with its
# translations in the .xtb files. Multiple source strings may have the same
# message text; hence the message id is mapped to a list of string ids
# instead of a single value.
translation_ids = {}
for (string_id, message_text) in source_strings.items():
message_id = tclib.GenerateMessageId(message_text)
translation_ids.setdefault(message_id, []).append(string_id);
# Track any xtb files that appear in the xtb folder but are not present in
# the grd file.
extra_xtb_files = []
# Gather the translated strings from the .xtb files. Use the en-US string
# for any message lacking a translation.
xtb_handler = XtbHandler(translation_ids)
sax_parser.setContentHandler(xtb_handler)
for xtb_filename in xtb_files:
if not xtb_filename in source_xtb_files:
extra_xtb_files.append(xtb_filename)
sax_parser.parse(xtb_filename)
for string_id, message_text in source_strings.items():
translated_string = xtb_handler.translations.get(string_id,
message_text)
translated_strings.append(self.__TranslationData(string_id,
xtb_handler.lang,
translated_string))
if missing_xtb_files or extra_xtb_files:
if missing_xtb_files:
missing_error = ("There were files that were found in the .grd file "
"'{}' but do not exist on disk:\n{}")
print(missing_error.format(grd_file, '\n'.join(missing_xtb_files)))
if extra_xtb_files:
extra_error = ("There were files that exist on disk but were not found "
"in the .grd file '{}':\n{}")
print(extra_error.format(grd_file, '\n'.join(extra_xtb_files)))
sys.exit(1)
return translated_strings
def __WriteRCFile(self, translated_strings):
"""Writes a resource file with the strings provided in |translated_strings|.
"""
HEADER_TEXT = (
u'#include "%s"\n\n'
u'STRINGTABLE\n'
u'BEGIN\n'
) % os.path.basename(self.header_file)
FOOTER_TEXT = (
u'END\n'
)
with io.open(self.rc_file,
mode='w',
encoding='utf-16',
newline='\n') as outfile:
outfile.write(HEADER_TEXT)
for translation in translated_strings:
# Escape special characters for the rc file.
escaped_text = (translation.translation.replace('"', '""')
.replace('\t', '\\t')
.replace('\n', '\\n'))
outfile.write(u' %s "%s"\n' %
(translation.resource_id_str + '_' + translation.language,
escaped_text))
outfile.write(FOOTER_TEXT)
def __WriteHeaderFile(self, translated_strings):
"""Writes a .h file with resource ids."""
# TODO(grt): Stream the lines to the file rather than building this giant
# list of lines first.
lines = []
do_languages_lines = ['\n#define DO_LANGUAGES']
installer_string_mapping_lines = ['\n#define DO_STRING_MAPPING']
do_mode_strings_lines = ['\n#define DO_MODE_STRINGS']
# Write the values for how the languages ids are offset.
seen_languages = set()
offset_id = 0
for translation_data in translated_strings:
lang = translation_data.language
if lang not in seen_languages:
seen_languages.add(lang)
lines.append('#define IDS_L10N_OFFSET_%s %s' % (lang, offset_id))
do_languages_lines.append(' HANDLE_LANGUAGE(%s, IDS_L10N_OFFSET_%s)'
% (lang.replace('_', '-').lower(), lang))
offset_id += 1
else:
break
# Write the resource ids themselves.
resource_id = self.first_resource_id
for translation_data in translated_strings:
lines.append('#define %s %s' % (translation_data.resource_id_str + '_' +
translation_data.language,
resource_id))
resource_id += 1
# Handle mode-specific strings.
for string_id, brands in self.mode_specific_strings.items():
# Populate the DO_MODE_STRINGS macro.
brand_strings = brands.get(self.brand)
if not brand_strings:
raise RuntimeError(
'No strings declared for brand \'%s\' in MODE_SPECIFIC_STRINGS for '
'message %s' % (self.brand, string_id))
do_mode_strings_lines.append(
' HANDLE_MODE_STRING(%s_BASE, %s)'
% (string_id, ', '.join([ ('%s_BASE' % s) for s in brand_strings])))
# Generate defines for the specific strings to extract or take all of the
# strings found in the translations.
if self.string_id_set:
string_ids_to_write = self.string_id_set;
else:
string_ids_to_write = {t.resource_id_str for t in translated_strings}
# Write out base ID values.
for string_id in sorted(string_ids_to_write):
lines.append('#define %s_BASE %s_%s' % (string_id,
string_id,
translated_strings[0].language))
installer_string_mapping_lines.append(' HANDLE_STRING(%s_BASE, %s)'
% (string_id, string_id))
with open(self.header_file, 'w') as outfile:
outfile.write('\n'.join(lines))
outfile.write('\n#ifndef RC_INVOKED')
outfile.write(' \\\n'.join(do_languages_lines))
outfile.write(' \\\n'.join(installer_string_mapping_lines))
outfile.write(' \\\n'.join(do_mode_strings_lines))
# .rc files must end in a new line
outfile.write('\n#endif // ndef RC_INVOKED\n')
def BuildArgumentParser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-b',
help='identifier of the browser brand (e.g., chromium).'
'This argument is mandatory if the module file included'
'by --extract-datafile contains MODE_SPECIFIC_STRINGS',
dest='brand')
parser.add_argument('-i', action='append',
required=True,
help='path to .grd file',
dest='input_grd_files')
parser.add_argument('-r', action='append',
required=True,
help='relative path to .xtb dir for each .grd file',
dest='input_xtb_relative_paths')
parser.add_argument('-x', action='append',
required=True,
help='expected xtb input files to read',
dest='expected_xtb_input_files')
parser.add_argument('--header-file',
required=True,
help='path to generated .h file to write',
dest='header_file')
parser.add_argument('--rc-file',
required=True,
help='path to generated .rc file to write',
dest='rc_file')
parser.add_argument('--first-resource-id',
type=int,
required=True,
help='first id for the generated string resources',
dest='first_resource_id')
parser.add_argument('--extract-datafile',
help='the python file execute that will define the '
'specific strings to extract from the source .grd file.'
'The module should contain a global array STRING_IDS '
'that specifies which string IDs need to be extracted '
'(if no global member by that name exists, then all the '
'strings are extracted). It may also optionally contain '
'a dictionary MODE_SPECIFIC_STRINGS which defines the '
'mode-specific strings to use for a given brand that is '
'extracted.',
dest='extract_datafile')
return parser
def main():
parser = BuildArgumentParser()
args = parser.parse_args()
# Extract all the strings from the given grd by default.
string_ids_to_extract = []
mode_specific_strings = {}
# Check to see if an external module containing string extraction information
# was specified.
extract_datafile = args.extract_datafile
if extract_datafile:
datafile_locals = dict();
exec(open(extract_datafile).read(), globals(), datafile_locals)
if 'STRING_IDS' in datafile_locals:
string_ids_to_extract = datafile_locals['STRING_IDS']
if 'MODE_SPECIFIC_STRINGS' in datafile_locals:
if not string_ids_to_extract:
parser.error('MODE_SPECIFIC_STRINGS was specified in file ' +
extract_datafile + ' but there were no specific STRING_IDS '
'specified for extraction')
mode_specific_strings = datafile_locals['MODE_SPECIFIC_STRINGS']
brand = args.brand
if brand:
if not mode_specific_strings:
parser.error('A brand was specified (' + brand + ') but no mode '
'specific strings were given.')
valid_brands = [b for b in
next(iter(mode_specific_strings.values())).keys()]
if not brand in valid_brands:
parser.error('A brand was specified (' + brand + ') but it is not '
'a valid brand [' + ', '.join(valid_brands) + '].')
elif mode_specific_strings:
parser.error('MODE_SPECIFIC_STRINGS were specified but no brand was '
'given.')
grd_files = args.input_grd_files
xtb_relative_paths = args.input_xtb_relative_paths
if len(grd_files) != len(xtb_relative_paths):
parser.error('Mismatch in number of grd files ({}) and xtb relative '
'paths ({})'.format(len(grd_files), len(xtb_relative_paths)))
inputs = zip(grd_files, xtb_relative_paths)
StringRcMaker(inputs, args.expected_xtb_input_files, args.header_file,
args.rc_file, brand, args.first_resource_id, string_ids_to_extract,
mode_specific_strings).MakeFiles()
return 0
if '__main__' == __name__:
sys.exit(main())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 Chris Liechti <cliechti@gmx.net>
# All Rights Reserved.
# Simplified BSD License (see LICENSE.txt for full text)
"""\
This is a parser for configuration files that describe the memory map of a
micro controller.
The syntax is quite simple: it is parsed as white space delimited words.
# <-- this is a comment, it skips the rest of the line
It also supports templates that can be used to abbreviate definitions.
Template processing is also quite simple. Once the template text and the
variables are defined, it reads in the words, assigns it to the variables.
Every time values for all variables are available it parses the resulting text
using the same rules described here.
Supported commands are:
Words in brackets ("<...>") mean that the words following the command are
consumed as parameter to the command.
Example::
memory-map-begin
name LOGICAL
# declare a "DATA" segment at the beginning of RAM
segment .data in:RAM
segment .bss in:RAM
segment .noinit in:RAM
symbol _stack in:RAM,location:end
# declare multiple segments that are located in FLASH
programmable segment .text in:FLASH
programmable segment .const in:FLASH
programmable segment .data_init in:FLASH,mirror:.data
memory-map-end
memory-map-begin
name MSP430F2xx
based-on LOGICAL
read-only segment .bootloader 0x0c00-0x0fff
programmable segment .infomem 0x1000-0x10ff
programmable segment .infoD 0x1000-0x103f
programmable segment .infoC 0x1040-0x107f
programmable segment .infoB 0x1080-0x10bf
programmable segment .infoA 0x10c0-0x10ff
programmable segment .vectors 0xffe0-0xffff
memory-map-end
template-begin
memory-map-begin
based-on MSP430F2xx
segment RAM <RAM>
programmable segment FLASH <FLASH>
name <MCU>
memory-map-end
template-variables
<MCU> <RAM> <FLASH>
template-values
MSP430F2001 0x0200-0x027f 0xfc00-0xffdf # 128B RAM, 1kB Flash
MSP430F2002 0x0200-0x027f 0xfc00-0xffdf # 128B RAM, 1kB Flash
MSP430F2003 0x0200-0x027f 0xfc00-0xffdf # 128B RAM, 1kB Flash
MSP430F2011 0x0200-0x027f 0xf800-0xffdf # 128B RAM, 2kB Flash
MSP430F2012 0x0200-0x027f 0xf800-0xffdf # 128B RAM, 2kB Flash
MSP430F2013 0x0200-0x027f 0xf800-0xffdf # 128B RAM, 2kB Flash
template-end
"""
import rpn
import pkgutil
class MCUDefintitionError(Exception):
"""for errors in de MCU definition file"""
def filtered_words(words, mapping):
"""\
Go through a sequence of words. Words occuring in mapping are replaced,
others are passed on as-is.
"""
for word in words:
yield mapping.get(word, word)
class MCUDefintitions(rpn.RPN):
def __init__(self):
rpn.RPN.__init__(self)
self.flags = []
self.memory_maps = {}
self.memory_map = None
self.order = 0
def address_range(self, range_str):
"""\
Split an address range (string) like '0x0200-0x0300' in a (from, to)
tuple.
"""
a, b = range_str.split('-')
return int(a, 0), int(b, 0)
@rpn.word('TEMPLATE-BEGIN')
def word_TEMPLATE_BEGIN(self, stack):
"""\
Read and execute a template. This command consists of 3 sections:
- definition of a text
- definition of a set of variables
- values for the variables
template-begin
Begin a template. What follows is the text of the template itself. It may
contain special words that will be used as variables. They can have any
name. The template text is finished with the command
'template_variables'.
template-variables
The names of the variables follow. These are the words that are used in the
previously defined template text. This section is terminated by
'template_values'.
template-values
Values are following until 'template_end' is found. Each word that is read
is assigned to the list of values. When the list of values has the same
length as the list of variables are they replaced in the template text and
the resulting text is parsed again.
template-end
Denotes the end of a values section in a template.
Example::
template-begin
memory-map-begin
name <MCU>
based-on MSP430F2xx
segment RAM <RAM>
programmable segment FLASH <FLASH>
memory-map-end
template-variables
<MCU> <RAM> <FLASH>
template-values
MSP430F2001 0x0200-0x027f 0xfc00-0xffdf # 128B RAM, 1kB Flash
MSP430F2002 0x0200-0x027f 0xfc00-0xffdf # 128B RAM, 1kB Flash
MSP430F2003 0x0200-0x027f 0xfc00-0xffdf # 128B RAM, 1kB Flash
MSP430F2011 0x0200-0x027f 0xf800-0xffdf # 128B RAM, 2kB Flash
MSP430F2012 0x0200-0x027f 0xf800-0xffdf # 128B RAM, 2kB Flash
MSP430F2013 0x0200-0x027f 0xf800-0xffdf # 128B RAM, 2kB Flash
template-end
"""
template = []
# read the template itself
while True:
word = self.next_word()
if word.lower() == 'template-variables':
break
template.append(word)
#~ template = ' '.join(template)
# read the variables
template_variables = []
while True:
word = self.next_word()
if word.lower() == 'template-values':
break
template_variables.append(word)
# apply the template to the following values
template_row = []
while True:
word = self.next_word()
if word.lower() == 'template-end':
if template_row:
raise MCUDefintitionError('number of values in template not a multiple of number of variables')
break
# collect values
template_row.append(word)
# enough values for template -> apply
if len(template_row) == len(template_variables):
self.memory_maps.update(parse_words(filtered_words(template, dict(zip(template_variables, template_row)))))
template_row = []
@rpn.word('PROGRAMMABLE')
def word_PROGRAMMABLE(self, stack):
"""\
Set flag that the next defined segment is programmed on the target.
Example::
programmable segment .text in:FLASH
"""
if self.memory_map is None:
raise MCUDefintitionError('flags outside memory map definition not allowed')
self.flags.append('programmable')
@rpn.word('READ-ONLY')
def word_READ_ONLY(self, stack):
"""\
Set flag that the next defined segment is read-only (not programmed to
target).
Example::
read-only segment bootloader 0x0c00-0x0fff
"""
if self.memory_map is None:
raise MCUDefintitionError('flags outside memory map definition not allowed')
self.flags.append('read-only')
@rpn.word('MEMORY-MAP-BEGIN')
def word_MEMORY_MAP_BEGIN(self, stack):
"""
Start the definition of a memory map for a MCU. It's expected that the
NAME_ and SEGMENT_ commands are used to define a memory map.
Example::
memory-map-begin
name LOGICAL
# declare a "DATA" segment at the beginning of RAM
segment .data in:RAM
segment .bss in:RAM
segment .noinit in:RAM
symbol _stack in:RAM,location:end
# declare multiple segments that are located in FLASH
programmable segment .text in:FLASH
programmable segment .const in:FLASH
programmable segment .data_init in:FLASH,mirror:.data
memory-map-end
memory-map-begin
name MSP430F2xx
based-on LOGICAL
read-only segment .bootloader 0x0c00-0x0fff
programmable segment .infomem 0x1000-0x10ff
programmable segment .infoD 0x1000-0x103f
programmable segment .infoC 0x1040-0x107f
programmable segment .infoB 0x1080-0x10bf
programmable segment .infoA 0x10c0-0x10ff
programmable segment .vectors 0xffe0-0xffff
memory-map-end
"""
if self.memory_map is not None:
raise MCUDefintitionError('MEMORY-MAP-BEGIN without terminating the last map')
self.memory_map = {}
@rpn.word('MEMORY-MAP-END')
def word_MEMORY_MAP_END(self, stack):
"""Terminate current memory map definition. See `MEMORY-MAP-BEGIN`_."""
if '__name__' not in self.memory_map:
raise MCUDefintitionError('each memory map requires a NAME')
self.memory_maps[self.memory_map['__name__']] = self.memory_map
self.memory_map = None
@rpn.word('SEGMENT')
def word_SEGMENT(self, stack):
"""\
Example::
segment <name> <memory_range>
Defines a segment.
Previously set flags are applied and cleared.
``<memory_range>`` can be an address range like ``0x0200-0x0300`` or a
set of ``key:value`` pairs:
``in:<segment_name>``
This segment is placed within an other parent segment. The memory
range is inherited from the parent. Multiple segments can be placed
in one parent segment.
``mirror:<segment_name>``
The contents of this segment will be a copy of the given one. A typical use is
to make a copy of the ``.data`` section that is in RAM and needs to
be initialized (by the startup code) from a copy located in Flash memory::
programmable segment .data_init in:FLASH,mirror:.data
"""
if self.memory_map is None:
raise MCUDefintitionError('SEGMENT outside memory map definition not allowed')
segment_name = self.next_word()
address = self.next_word()
if ':' in address:
# dictionary mode
self.memory_map[segment_name] = {}
for pair in address.split(','):
key, value = pair.split(':')
self.memory_map[segment_name][key] = value
else:
# address range
start, end = self.address_range(address)
self.memory_map[segment_name] = {'start':start, 'end':end}
self.memory_map[segment_name]['order'] = self.order
self.memory_map[segment_name]['flags'] = self.flags
self.memory_map[segment_name]['__name__'] = segment_name
self.memory_map[segment_name]['__type__'] = 'segment'
self.flags = []
self.order += 1
@rpn.word('SYMBOL')
def word_SYMBOL(self, stack):
"""\
Example::
symbol <name> <address>
Defines a symbol with the value specified. ``<address>`` can also be a computed
value. e.g. ``in:RAM,location:end``.
Supported are: ``in:<segment_name>`` and ``location:[start|end]``. These
values are computed at load time, i.e. the segment still have the address
range specified in the definition (opposed to the values after the linker has
"shrinked" the segments to the size of actually present data). Note that
``location:end`` is the segments last address plus one (end is exclusive in
this case).
"""
symbol_name = self.next_word()
address = self.next_word()
if ':' in address:
# dictionary mode
self.memory_map[symbol_name] = {}
for pair in address.split(','):
key, value = pair.split(':')
self.memory_map[symbol_name][key] = value
else:
# address
self.memory_map[symbol_name] = {'address':int(address,16)}
self.memory_map[symbol_name]['__name__'] = symbol_name
self.memory_map[symbol_name]['__type__'] = 'symbol'
@rpn.word('NAME')
def word_NAME(self, stack):
"""\
Set the name of a memory map.
Example::
name <name>
"""
if self.memory_map is None:
raise MCUDefintitionError('NAME outside memory map definition not allowed')
self.memory_map['__name__'] = self.next_word()
@rpn.word('BASED-ON')
def word_BASED_ON(self, stack):
"""\
Tell that a memory map definition builds on an other definition.
All the definitions are merged when used.
Example::
based-on <name>
"""
if self.memory_map is None:
raise MCUDefintitionError('BASED-ON outside memory map definition not allowed')
self.memory_map['__based_on__'] = self.next_word()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def parse_words(iterable):
"""\
Parse a configuration file/text using the given iterable.
"""
p = MCUDefintitions()
p.interpret(iterable)
return p.memory_maps
def expand_definition(memory_maps, name):
"""\
Recursively expand the '__based_on__' keys to create a 'flat' definition
for the given MCU name.
"""
map = dict(memory_maps[name]) # get a copy of the dict
try:
base = map.pop('__based_on__')
except KeyError:
pass
else:
map.update(expand_definition(memory_maps, base))
if '__name__' in map: del map['__name__'] # name was overwritten by lowest base
map['__name__'] = name
return map
def load_internal():
"""\
Load configuration file and only return a single, expanded memory map for
given mcu_name.
"""
data = pkgutil.get_data('msp430.asm', 'definitions/msp430-mcu-list.txt')
return parse_words(rpn.words_in_string(data, name='msp430-mcu-list.txt'))
def load_from_file(filename):
"""\
Load configuration file and only return a single, expanded memory map for
given mcu_name.
"""
return parse_words(rpn.words_in_file(filename))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# test only
if __name__ == '__main__':
from optparse import OptionParser
from pprint import pprint
parser = OptionParser()
parser.add_option("-l", "--list",
action = "store_true",
dest = "list",
default = False,
help = "list available MCU names")
parser.add_option("-d", "--dump",
action = "store_true",
dest = "dump",
default = False,
help = "dump all data instead of pretty printing")
(options, args) = parser.parse_args()
try:
memory_maps = load_internal()
except rpn.RPNError, e:
print "%s:%s: %s" % (e.filename, e.lineno, e)
else:
if options.list:
for mcu in sorted(memory_maps):
print mcu
#~ pprint(memory_maps)
for mcu in args:
print '== memory map for %s ==' % mcu
memmap = expand_definition(memory_maps, mcu)
if options.dump:
pprint(memmap)
else:
for name, segment in sorted(memmap.items()):
if not name.startswith('__') and 'start' in segment:
print '%-12s %08x-%08x %s' % (
name,
segment['start'],
segment['end'],
','.join(segment['flags']))
| |
"""Tests for aiohttp/worker.py"""
import asyncio
import pathlib
import ssl
from unittest import mock
import pytest
from aiohttp import helpers
from aiohttp.test_utils import make_mocked_coro
base_worker = pytest.importorskip('aiohttp.worker')
try:
import uvloop
except ImportError:
uvloop = None
WRONG_LOG_FORMAT = '%a "%{Referrer}i" %(h)s %(l)s %s'
ACCEPTABLE_LOG_FORMAT = '%a "%{Referrer}i" %s'
class BaseTestWorker:
def __init__(self):
self.servers = {}
self.exit_code = 0
self.cfg = mock.Mock()
self.cfg.graceful_timeout = 100
class AsyncioWorker(BaseTestWorker, base_worker.GunicornWebWorker):
pass
PARAMS = [AsyncioWorker]
if uvloop is not None:
class UvloopWorker(BaseTestWorker, base_worker.GunicornUVLoopWebWorker):
pass
PARAMS.append(UvloopWorker)
@pytest.fixture(params=PARAMS)
def worker(request):
ret = request.param()
ret.notify = mock.Mock()
return ret
def test_init_process(worker):
with mock.patch('aiohttp.worker.asyncio') as m_asyncio:
try:
worker.init_process()
except TypeError:
pass
assert m_asyncio.get_event_loop.return_value.close.called
assert m_asyncio.new_event_loop.called
assert m_asyncio.set_event_loop.called
def test_run(worker, loop):
worker.wsgi = mock.Mock()
worker.loop = loop
worker._run = mock.Mock(
wraps=asyncio.coroutine(lambda: None))
worker.wsgi.startup = make_mocked_coro(None)
with pytest.raises(SystemExit):
worker.run()
assert worker._run.called
worker.wsgi.startup.assert_called_once_with()
assert loop.is_closed()
def test_handle_quit(worker):
worker.handle_quit(object(), object())
assert not worker.alive
assert worker.exit_code == 0
def test_handle_abort(worker):
worker.handle_abort(object(), object())
assert not worker.alive
assert worker.exit_code == 1
def test_init_signals(worker):
worker.loop = mock.Mock()
worker.init_signals()
assert worker.loop.add_signal_handler.called
def test_make_handler(worker, mocker):
worker.wsgi = mock.Mock()
worker.loop = mock.Mock()
worker.log = mock.Mock()
worker.cfg = mock.Mock()
worker.cfg.access_log_format = ACCEPTABLE_LOG_FORMAT
mocker.spy(worker, '_get_valid_log_format')
f = worker.make_handler(worker.wsgi)
assert f is worker.wsgi.make_handler.return_value
assert worker._get_valid_log_format.called
@pytest.mark.parametrize('source,result', [
(ACCEPTABLE_LOG_FORMAT, ACCEPTABLE_LOG_FORMAT),
(AsyncioWorker.DEFAULT_GUNICORN_LOG_FORMAT,
AsyncioWorker.DEFAULT_AIOHTTP_LOG_FORMAT),
])
def test__get_valid_log_format_ok(worker, source, result):
assert result == worker._get_valid_log_format(source)
def test__get_valid_log_format_exc(worker):
with pytest.raises(ValueError) as exc:
worker._get_valid_log_format(WRONG_LOG_FORMAT)
assert '%(name)s' in str(exc)
@asyncio.coroutine
def test__run_ok(worker, loop):
worker.ppid = 1
worker.alive = True
worker.servers = {}
sock = mock.Mock()
sock.cfg_addr = ('localhost', 8080)
worker.sockets = [sock]
worker.wsgi = mock.Mock()
worker.close = make_mocked_coro(None)
worker.log = mock.Mock()
worker.loop = loop
loop.create_server = make_mocked_coro(sock)
worker.wsgi.make_handler.return_value.requests_count = 1
worker.cfg.max_requests = 100
worker.cfg.is_ssl = True
worker.cfg.access_log_format = ACCEPTABLE_LOG_FORMAT
ssl_context = mock.Mock()
with mock.patch('ssl.SSLContext', return_value=ssl_context):
with mock.patch('aiohttp.worker.asyncio') as m_asyncio:
m_asyncio.sleep = mock.Mock(
wraps=asyncio.coroutine(lambda *a, **kw: None))
yield from worker._run()
worker.notify.assert_called_with()
worker.log.info.assert_called_with("Parent changed, shutting down: %s",
worker)
args, kwargs = loop.create_server.call_args
assert 'ssl' in kwargs
ctx = kwargs['ssl']
assert ctx is ssl_context
@asyncio.coroutine
def test__run_exc(worker, loop):
with mock.patch('aiohttp.worker.os') as m_os:
m_os.getpid.return_value = 1
m_os.getppid.return_value = 1
handler = mock.Mock()
handler.requests_count = 0
worker.servers = {mock.Mock(): handler}
worker.ppid = 1
worker.alive = True
worker.sockets = []
worker.log = mock.Mock()
worker.loop = loop
worker.cfg.is_ssl = False
worker.cfg.max_redirects = 0
worker.cfg.max_requests = 100
with mock.patch('aiohttp.worker.asyncio.sleep') as m_sleep:
slp = helpers.create_future(loop)
slp.set_exception(KeyboardInterrupt)
m_sleep.return_value = slp
worker.close = make_mocked_coro(None)
yield from worker._run()
m_sleep.assert_called_with(1.0, loop=loop)
worker.close.assert_called_with()
@asyncio.coroutine
def test_close(worker, loop):
srv = mock.Mock()
srv.wait_closed = make_mocked_coro(None)
handler = mock.Mock()
worker.servers = {srv: handler}
worker.log = mock.Mock()
worker.loop = loop
app = worker.wsgi = mock.Mock()
app.cleanup = make_mocked_coro(None)
handler.connections = [object()]
handler.finish_connections.return_value = helpers.create_future(loop)
handler.finish_connections.return_value.set_result(1)
app.shutdown.return_value = helpers.create_future(loop)
app.shutdown.return_value.set_result(None)
yield from worker.close()
app.shutdown.assert_called_with()
app.cleanup.assert_called_with()
handler.finish_connections.assert_called_with(timeout=95.0)
srv.close.assert_called_with()
assert worker.servers is None
yield from worker.close()
@asyncio.coroutine
def test__run_ok_no_max_requests(worker, loop):
worker.ppid = 1
worker.alive = True
worker.servers = {}
sock = mock.Mock()
sock.cfg_addr = ('localhost', 8080)
worker.sockets = [sock]
worker.wsgi = mock.Mock()
worker.close = make_mocked_coro(None)
worker.log = mock.Mock()
worker.loop = loop
loop.create_server = make_mocked_coro(sock)
worker.wsgi.make_handler.return_value.requests_count = 1
worker.cfg.access_log_format = ACCEPTABLE_LOG_FORMAT
worker.cfg.max_requests = 0
worker.cfg.is_ssl = True
ssl_context = mock.Mock()
with mock.patch('ssl.SSLContext', return_value=ssl_context):
with mock.patch('aiohttp.worker.asyncio') as m_asyncio:
m_asyncio.sleep = mock.Mock(
wraps=asyncio.coroutine(lambda *a, **kw: None))
yield from worker._run()
worker.notify.assert_called_with()
worker.log.info.assert_called_with("Parent changed, shutting down: %s",
worker)
args, kwargs = loop.create_server.call_args
assert 'ssl' in kwargs
ctx = kwargs['ssl']
assert ctx is ssl_context
@asyncio.coroutine
def test__run_ok_max_requests_exceeded(worker, loop):
worker.ppid = 1
worker.alive = True
worker.servers = {}
sock = mock.Mock()
sock.cfg_addr = ('localhost', 8080)
worker.sockets = [sock]
worker.wsgi = mock.Mock()
worker.close = make_mocked_coro(None)
worker.log = mock.Mock()
worker.loop = loop
loop.create_server = make_mocked_coro(sock)
worker.wsgi.make_handler.return_value.requests_count = 15
worker.cfg.access_log_format = ACCEPTABLE_LOG_FORMAT
worker.cfg.max_requests = 10
worker.cfg.is_ssl = True
ssl_context = mock.Mock()
with mock.patch('ssl.SSLContext', return_value=ssl_context):
with mock.patch('aiohttp.worker.asyncio') as m_asyncio:
m_asyncio.sleep = mock.Mock(
wraps=asyncio.coroutine(lambda *a, **kw: None))
yield from worker._run()
worker.notify.assert_called_with()
worker.log.info.assert_called_with("Max requests, shutting down: %s",
worker)
args, kwargs = loop.create_server.call_args
assert 'ssl' in kwargs
ctx = kwargs['ssl']
assert ctx is ssl_context
def test__create_ssl_context_without_certs_and_ciphers(worker):
here = pathlib.Path(__file__).parent
worker.cfg.ssl_version = ssl.PROTOCOL_SSLv23
worker.cfg.cert_reqs = ssl.CERT_OPTIONAL
worker.cfg.certfile = str(here / 'sample.crt')
worker.cfg.keyfile = str(here / 'sample.key')
worker.cfg.ca_certs = None
worker.cfg.ciphers = None
crt = worker._create_ssl_context(worker.cfg)
assert isinstance(crt, ssl.SSLContext)
| |
"""
Interest rate transformations
===============================================================================
Overview
-------------------------------------------------------------------------------
Transformations between nominal, effective and periodic interest rates
can be realized using **cashflows**. This module implements the
following functions:
* ``effrate``: computes the effective interest rate given the nominal interest
rate or the periodic interest rate.
* ``nomrate``: computes the nominal interest rate given the effective interest
rate or the periodic interest rate.
* ``perrate``: computes the periodic interest rate given the effective interest
rate or the nominal interest rate.
In addition, it is possible to compute discount and compounidng factors.
* ``to_discount_factor``: Returns a list of discount factors calculated as 1 / (1 + r)^(t - t0).
* ``to_compound_factor``: Returns a list of compounding factors calculated as (1 + r)^(t - t0).
Finally, also it is possible to compute a fixed equivalent rate given interest
rate changing over time using ``equivalent_rate``.
Functions in this module
-------------------------------------------------------------------------------
"""
import numpy as np
import pandas as pd
from cashflows.timeseries import *
from cashflows.common import *
def effrate(nrate=None, prate=None, pyr=1):
"""
Computes the effective interest rate given the nominal interest rate or the periodic interest rate.
Args:
nrate (float, pandas.Series): Nominal interest rate.
prate (float, pandas.Series): Periodic interest rate.
pyr(int): Number of compounding periods per year.
Returns:
Effective interest rate(float, pandas.Series).
**Examples**
In this example, the equivalent effective interest rate for a periodic
monthly interest rate of 1% is computed.
>>> effrate(prate=1, pyr=12) # doctest: +ELLIPSIS
12.68...
This example shows hot to compute the effective interes rate equivalent to
a nominal interest rate of 10% with montlhy compounding.
>>> effrate(nrate=10, pyr=12) # doctest: +ELLIPSIS
10.4713...
Also it is possible to use list for some arguments of the functions as
it is shown bellow.
>>> effrate(prate=1, pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 3.030100
1 6.152015
2 12.682503
dtype: float64
>>> effrate(nrate=10, pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 10.337037
1 10.426042
2 10.471307
dtype: float64
>>> effrate(prate=[1, 2, 3], pyr=12) # doctest: +ELLIPSIS
0 12.682503
1 26.824179
2 42.576089
dtype: float64
>>> effrate(nrate=[10, 12, 14], pyr=12) # doctest: +ELLIPSIS
0 10.471307
1 12.682503
2 14.934203
dtype: float64
When a rate and the number of compounding periods (``pyr``) are vectors, they
must have the same length. Computations are executed using the first rate
with the first compounding and so on.
>>> effrate(nrate=[10, 12, 14], pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 10.337037
1 12.616242
2 14.934203
dtype: float64
>>> effrate(prate=[1, 2, 3], pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 3.030100
1 12.616242
2 42.576089
dtype: float64
Also it is possible to transform interest rate time series.
>>> nrate = interest_rate(const_value=12, start='2000-06', periods=12, freq='6M')
>>> prate = perrate(nrate=nrate)
>>> effrate(nrate = nrate) # doctest: +NORMALIZE_WHITESPACE
2000-06 12.36
2000-12 12.36
2001-06 12.36
2001-12 12.36
2002-06 12.36
2002-12 12.36
2003-06 12.36
2003-12 12.36
2004-06 12.36
2004-12 12.36
2005-06 12.36
2005-12 12.36
Freq: 6M, dtype: float64
>>> effrate(prate = prate) # doctest: +NORMALIZE_WHITESPACE
2000-06 12.36
2000-12 12.36
2001-06 12.36
2001-12 12.36
2002-06 12.36
2002-12 12.36
2003-06 12.36
2003-12 12.36
2004-06 12.36
2004-12 12.36
2005-06 12.36
2005-12 12.36
Freq: 6M, dtype: float64
"""
numnone = 0
if nrate is None:
numnone += 1
if prate is None:
numnone += 1
if numnone != 1:
raise ValueError('One of the rates must be set to `None`')
if isinstance(nrate, pd.Series):
pyr = getpyr(nrate)
erate = nrate.copy()
for index in range(len(nrate)):
erate[index] = 100 * (np.power(1 + nrate[index]/100/pyr, pyr) - 1)
return erate
if isinstance(prate, pd.Series):
pyr = getpyr(prate)
erate = prate.copy()
for index in range(len(prate)):
erate[index] = 100 * (np.power(1 + prate[index]/100, pyr) - 1)
return erate
if nrate is not None:
##
##
maxlen = 1
if isinstance(nrate, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(nrate))
if isinstance(pyr, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(pyr))
#
if isinstance(nrate, (int, float)):
nrate = [nrate] * maxlen
nrate = pd.Series(nrate, dtype=np.float64)
if isinstance(pyr, (int, float)):
pyr = [pyr] * maxlen
pyr = pd.Series(pyr)
#
if len(nrate) != len(pyr):
raise ValueError('Lists must have the same length')
##
##
prate = nrate / pyr
erate = 100 * (np.power(1 + prate/100, pyr) - 1)
if maxlen == 1:
erate = erate[0]
return erate
if prate is not None:
##
##
maxlen = 1
if isinstance(prate, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(prate))
if isinstance(pyr, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(pyr))
#
if isinstance(prate, (int, float)):
prate = [prate] * maxlen
prate = pd.Series(prate, dtype=np.float64)
if isinstance(pyr, (int, float)):
pyr = [pyr] * maxlen
pyr = pd.Series(pyr)
#
if len(prate) != len(pyr):
raise ValueError('Lists must have the same length')
##
##
erate = 100 * (np.power(1 + prate / 100, pyr) - 1)
if maxlen == 1:
erate = erate[0]
return erate
def nomrate(erate=None, prate=None, pyr=1):
"""
Computes the nominal interest rate given the nominal interest rate or the periodic interest rate.
Args:
erate (float, pandas.Series): Effective interest rate.
prate (float, pandas.Series): Periodic interest rate.
pyr(int): Number of compounding periods per year.
Returns:
Nominal interest rate(float, pandas.Series).
**Examples**
>>> nomrate(prate=1, pyr=12) # doctest: +ELLIPSIS
12.0
>>> nomrate(erate=10, pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 9.684035
1 9.607121
2 9.568969
dtype: float64
>>> nomrate(erate=10, pyr=12) # doctest: +ELLIPSIS
9.5689...
>>> nomrate(prate=1, pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 3.0
1 6.0
2 12.0
dtype: float64
>>> nomrate(erate=[10, 12, 14], pyr=12) # doctest: +ELLIPSIS
0 9.568969
1 11.386552
2 13.174622
dtype: float64
>>> nomrate(prate=[1, 2, 3], pyr=12) # doctest: +ELLIPSIS
0 12.0
1 24.0
2 36.0
dtype: float64
When a rate and the number of compounding periods (`pyr`) are vectors, they
must have the same length. Computations are executed using the first rate
with the first compounding and so on.
>>> nomrate(erate=[10, 12, 14], pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 9.684035
1 11.440574
2 13.174622
dtype: float64
>>> nomrate(prate=[1, 2, 3], pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 3.0
1 12.0
2 36.0
dtype: float64
>>> prate = interest_rate(const_value=6.00, start='2000-06', periods=12, freq='6M')
>>> erate = effrate(prate=prate)
>>> nomrate(erate=erate)
2000-06 12.0
2000-12 12.0
2001-06 12.0
2001-12 12.0
2002-06 12.0
2002-12 12.0
2003-06 12.0
2003-12 12.0
2004-06 12.0
2004-12 12.0
2005-06 12.0
2005-12 12.0
Freq: 6M, dtype: float64
>>> nomrate(prate=prate)
2000-06 12.0
2000-12 12.0
2001-06 12.0
2001-12 12.0
2002-06 12.0
2002-12 12.0
2003-06 12.0
2003-12 12.0
2004-06 12.0
2004-12 12.0
2005-06 12.0
2005-12 12.0
Freq: 6M, dtype: float64
"""
numnone = 0
if erate is None:
numnone += 1
if prate is None:
numnone += 1
if numnone != 1:
raise ValueError('One of the rates must be set to `None`')
if isinstance(erate, pd.Series):
pyr = getpyr(erate)
nrate = erate.copy()
for index in range(len(erate)):
nrate[index] = 100 * pyr * (np.power(1 + erate[index]/100, 1. / pyr) - 1)
return nrate
if isinstance(prate, pd.Series):
pyr = getpyr(prate)
nrate = prate.copy()
for index in range(len(prate)):
nrate[index] = prate[index] * pyr
return nrate
if erate is not None:
##
##
maxlen = 1
if isinstance(erate, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(erate))
if isinstance(pyr, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(pyr))
#
if isinstance(erate, (int, float)):
erate = [erate] * maxlen
erate = pd.Series(erate, dtype=np.float64)
#
if isinstance(pyr, (int, float)):
pyr = [pyr] * maxlen
pyr = pd.Series(pyr)
#
if len(erate) != len(pyr):
raise ValueError('Lists must have the same length')
##
##
prate = 100 * (np.power(1 + erate / 100, 1 / pyr) - 1)
nrate = pyr * prate
if maxlen == 1:
nrate = nrate[0]
return nrate
if prate is not None:
##
##
maxlen = 1
if isinstance(prate, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(prate))
if isinstance(pyr, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(pyr))
#
if isinstance(prate, (int, float)):
prate = [prate] * maxlen
prate = pd.Series(prate, dtype=np.float64)
if isinstance(pyr, (int, float)):
pyr = [pyr] * maxlen
pyr = pd.Series(pyr)
#
if len(prate) != len(pyr):
raise ValueError('Lists must have the same length')
##
##
nrate = pyr * prate
if maxlen == 1:
nrate = nrate[0]
return nrate
def perrate(nrate=None, erate=None, pyr=1):
"""
Computes the periodic interest rate given the nominal interest rate or the effective interest rate.
Args:
nrate (float, pandas.Series): Nominal interest rate.
erate (float, pandas.Series): Effective interest rate.
pyr(int): Number of compounding periods per year.
Returns:
Periodic interest rate(float, pandas.Series).
**Examples**
>>> perrate(nrate=10, pyr=12) # doctest: +ELLIPSIS
0.8333...
>>> perrate(erate=10, pyr=12) # doctest: +ELLIPSIS
0.7974...
>>> perrate(erate=10, pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 3.228012
1 1.601187
2 0.797414
dtype: float64
>>> perrate(nrate=10, pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 3.333333
1 1.666667
2 0.833333
dtype: float64
>>> perrate(erate=[10, 12, 14], pyr=12) # doctest: +ELLIPSIS
0 0.797414
1 0.948879
2 1.097885
dtype: float64
>>> perrate(nrate=[10, 12, 14], pyr=12) # doctest: +ELLIPSIS
0 0.833333
1 1.000000
2 1.166667
dtype: float64
When a rate and the number of compounding periods (``pyr``) are vectors, they
must have the same length. Computations are executed using the first rate
with the first compounding and so on.
>>> perrate(erate=[10, 12, 14], pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 3.228012
1 1.906762
2 1.097885
dtype: float64
>>> perrate(nrate=[10, 12, 14], pyr=[3, 6, 12]) # doctest: +ELLIPSIS
0 3.333333
1 2.000000
2 1.166667
dtype: float64
>>> nrate = interest_rate(const_value=12.0, start='2000-06', periods=12, freq='6M')
>>> erate = effrate(nrate=nrate)
>>> perrate(erate=erate) # doctest: +NORMALIZE_WHITESPACE
2000-06 6.0
2000-12 6.0
2001-06 6.0
2001-12 6.0
2002-06 6.0
2002-12 6.0
2003-06 6.0
2003-12 6.0
2004-06 6.0
2004-12 6.0
2005-06 6.0
2005-12 6.0
Freq: 6M, dtype: float64
>>> perrate(nrate=nrate) # doctest: +NORMALIZE_WHITESPACE
2000-06 6.0
2000-12 6.0
2001-06 6.0
2001-12 6.0
2002-06 6.0
2002-12 6.0
2003-06 6.0
2003-12 6.0
2004-06 6.0
2004-12 6.0
2005-06 6.0
2005-12 6.0
Freq: 6M, dtype: float64
"""
numnone = 0
if nrate is None:
numnone += 1
if erate is None:
numnone += 1
if numnone != 1:
raise ValueError('One of the rates must be set to `None`')
if isinstance(nrate, pd.Series):
pyr = getpyr(nrate)
prate = nrate.copy()
for index in range(len(nrate)):
prate[index] = nrate[index] / pyr
return prate
if isinstance(erate, pd.Series):
pyr = getpyr(erate)
prate = erate.copy()
for index in range(len(erate)):
prate[index] = 100 * (np.power(1 + erate[index]/100, 1. / pyr) - 1)
return prate
if nrate is not None:
##
##
maxlen = 1
if isinstance(nrate, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(nrate))
if isinstance(pyr, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(pyr))
#
if isinstance(nrate, (int, float)):
nrate = [nrate] * maxlen
nrate = pd.Series(nrate, dtype=np.float64)
if isinstance(pyr, (int, float)):
pyr = [pyr] * maxlen
pyr = pd.Series(pyr)
#
if len(nrate) != len(pyr):
raise ValueError('Lists must have the same length')
##
##
prate = nrate / pyr
if maxlen == 1:
prate = prate[0]
return prate
if erate is not None:
##
##
maxlen = 1
if isinstance(erate, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(erate))
if isinstance(pyr, (list, type(np.array), type(pd.Series))):
maxlen = max(maxlen, len(pyr))
#
if isinstance(erate, (int, float)):
erate = [erate] * maxlen
erate = pd.Series(erate, dtype=np.float64)
#
if isinstance(pyr, (int, float)):
pyr = [pyr] * maxlen
pyr = pd.Series(pyr)
#
if len(erate) != len(pyr):
raise ValueError('Lists must have the same length')
##
##
prate = 100 * (np.power(1 + erate / 100, 1 / pyr) - 1)
if maxlen == 1:
prate = prate[0]
return prate
def to_discount_factor(nrate=None, erate=None, prate=None, base_date=None):
"""Returns a list of discount factors calculated as 1 / (1 + r)^(t - t0).
Args:
nrate (pandas.Series): Nominal interest rate per year.
nrate (pandas.Series): Effective interest rate per year.
prate (pandas.Series): Periodic interest rate.
base_date (string): basis time.
Returns:
`pandas.Series` of float values.
Only one of the interest rates must be supplied for the computation.
**Example**
In this example, a discount factor is computed for a interest rate
expressed as nominal, periodic or effective interest rate.
>>> nrate = interest_rate(const_value=4, periods=10, start='2016Q1', freq='Q')
>>> erate = effrate(nrate=nrate)
>>> prate = perrate(nrate=nrate)
>>> to_discount_factor(nrate=nrate, base_date='2016Q3') # doctest: +ELLIPSIS
[1.0201, 1.01, 1.0, 0.990..., 0.980..., 0.970..., 0.960..., 0.951..., 0.942..., 0.932...]
>>> to_discount_factor(erate=erate, base_date='2016Q3') # doctest: +ELLIPSIS
[1.0201, 1.01, 1.0, 0.990..., 0.980..., 0.970..., 0.960..., 0.951..., 0.942..., 0.932...]
>>> to_discount_factor(prate=prate, base_date='2016Q3') # doctest: +ELLIPSIS
[1.0201, 1.01, 1.0, 0.990..., 0.980..., 0.970..., 0.960..., 0.951..., 0.942..., 0.932...]
"""
numnone = 0
if nrate is None:
numnone += 1
if erate is None:
numnone += 1
if prate is None:
numnone += 1
if numnone != 2:
raise ValueError('Two of the rates must be set to `None`')
if nrate is not None:
pyr = getpyr(nrate)
prate = nrate.copy()
for i,_ in enumerate(nrate):
prate[i] = nrate[i] / pyr # periodic rate
if erate is not None:
pyr = getpyr(erate)
prate = erate.copy()
for i,_ in enumerate(erate):
prate[i] = 100 * (np.power(1 + erate[i]/100, 1. / pyr) - 1) # periodic rate
pyr = getpyr(prate)
factor = [x/100 for x in prate]
for index, _ in enumerate(factor):
if index == 0:
factor[0] = 1 / (1 + factor[0])
else:
factor[index] = factor[index-1] / (1 + factor[index])
if isinstance(base_date, str):
base_date = pd.Period(base_date, freq=prate.axes[0].freq)
base_date = period2pos(prate.axes[0], base_date)
div = factor[base_date]
for index, _ in enumerate(factor):
factor[index] = factor[index] / div
return factor
def to_compound_factor(nrate=None, erate=None, prate=None, base_date=0):
"""Returns a list of compounding factors calculated as (1 + r)^(t - t0).
Args:
nrate (TimeSeries): Nominal interest rate per year.
nrate (TimeSeries): Effective interest rate per year.
prate (TimeSeries): Periodic interest rate.
base_date (int, tuple): basis time.
Returns:
Compound factor (list)
**Example**
In this example, a compound factor is computed for a interest rate
expressed as nominal, periodic or effective interest rate.
>>> nrate = interest_rate(const_value=4, start='2000', periods=10, freq='Q')
>>> erate = effrate(nrate=nrate)
>>> prate = perrate(nrate=nrate)
>>> to_compound_factor(prate=prate, base_date=2) # doctest: +ELLIPSIS
[0.980..., 0.990..., 1.0, 1.01, 1.0201, 1.030..., 1.040..., 1.051..., 1.061..., 1.072...]
>>> to_compound_factor(nrate=nrate, base_date=2) # doctest: +ELLIPSIS
[0.980..., 0.990..., 1.0, 1.01, 1.0201, 1.030..., 1.040..., 1.051..., 1.061..., 1.072...]
>>> to_compound_factor(erate=erate, base_date=2) # doctest: +ELLIPSIS
[0.980..., 0.990..., 1.0, 1.01, 1.0201, 1.030..., 1.040..., 1.051..., 1.061..., 1.072...]
"""
factor = to_discount_factor(nrate=nrate, erate=erate, prate=prate, base_date=base_date)
for time, _ in enumerate(factor):
factor[time] = 1 / factor[time]
return factor
def equivalent_rate(nrate=None, erate=None, prate=None):
"""Returns the equivalent interest rate over a time period.
Args:
nrate (TimeSeries): Nominal interest rate per year.
erate (TimeSeries): Effective interest rate per year.
prate (TimeSeries): Periodic interest rate.
Returns:
float value.
Only one of the interest rate must be supplied for the computation.
**Example**
In this example, the equivalent rate for a periodic interest rate of 10% is
computed.
>>> equivalent_rate(prate=interest_rate([10]*5, start='2000Q1', freq='Q')) # doctest: +ELLIPSIS
10.0...
"""
numnone = 0
if nrate is None:
numnone += 1
if erate is None:
numnone += 1
if prate is None:
numnone += 1
if numnone != 2:
raise ValueError('Two of the rates must be set to `None`')
if nrate is not None:
pyr = getpyr(nrate)
factor = 1
for element in nrate[1:]:
factor *= (1 + element / 100 / pyr)
return 100 * pyr * (factor**(1/(len(nrate) - 1)) - 1)
if prate is not None:
pyr = getpyr(prate)
factor = 1
for element in prate[1:]:
factor *= (1 + element / 100)
return 100 * (factor**(1/(len(prate) - 1)) - 1)
if erate is not None:
pyr = getpyr(erate)
factor = 1
for element in erate[1:]:
factor *= (1 + (numpy.power(1 + element/100, 1. / pyr) - 1))
return 100 * (factor**(1/(len(value) - 1)) - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
"""Policies
Note that Dispatchers are now implemented in "dispatcher.py", but
are still documented here.
Policies
A policy is an object which manages the interaction between a public
Python object, and COM . In simple terms, the policy object is the
object which is actually called by COM, and it invokes the requested
method, fetches/sets the requested property, etc. See the
@win32com.server.policy.CreateInstance@ method for a description of
how a policy is specified or created.
Exactly how a policy determines which underlying object method/property
is obtained is up to the policy. A few policies are provided, but you
can build your own. See each policy class for a description of how it
implements its policy.
There is a policy that allows the object to specify exactly which
methods and properties will be exposed. There is also a policy that
will dynamically expose all Python methods and properties - even those
added after the object has been instantiated.
Dispatchers
A Dispatcher is a level in front of a Policy. A dispatcher is the
thing which actually receives the COM calls, and passes them to the
policy object (which in turn somehow does something with the wrapped
object).
It is important to note that a policy does not need to have a dispatcher.
A dispatcher has the same interface as a policy, and simply steps in its
place, delegating to the real policy. The primary use for a Dispatcher
is to support debugging when necessary, but without imposing overheads
when not (ie, by not using a dispatcher at all).
There are a few dispatchers provided - "tracing" dispatchers which simply
prints calls and args (including a variation which uses
win32api.OutputDebugString), and a "debugger" dispatcher, which can
invoke the debugger when necessary.
Error Handling
It is important to realise that the caller of these interfaces may
not be Python. Therefore, general Python exceptions and tracebacks aren't
much use.
In general, there is an Exception class that should be raised, to allow
the framework to extract rich COM type error information.
The general rule is that the **only** exception returned from Python COM
Server code should be an Exception instance. Any other Python exception
should be considered an implementation bug in the server (if not, it
should be handled, and an appropriate Exception instance raised). Any
other exception is considered "unexpected", and a dispatcher may take
special action (see Dispatchers above)
Occasionally, the implementation will raise the policy.error error.
This usually means there is a problem in the implementation that the
Python programmer should fix.
For example, if policy is asked to wrap an object which it can not
support (because, eg, it does not provide _public_methods_ or _dynamic_)
then policy.error will be raised, indicating it is a Python programmers
problem, rather than a COM error.
"""
__author__ = "Greg Stein and Mark Hammond"
import win32api
import winerror
import sys
import types
import pywintypes
import win32con, pythoncom
#Import a few important constants to speed lookups.
from pythoncom import \
DISPATCH_METHOD, DISPATCH_PROPERTYGET, DISPATCH_PROPERTYPUT, DISPATCH_PROPERTYPUTREF, \
DISPID_UNKNOWN, DISPID_VALUE, DISPID_PROPERTYPUT, DISPID_NEWENUM, \
DISPID_EVALUATE, DISPID_CONSTRUCTOR, DISPID_DESTRUCTOR, DISPID_COLLECT,DISPID_STARTENUM
S_OK = 0
# Few more globals to speed things.
IDispatchType = pythoncom.TypeIIDs[pythoncom.IID_IDispatch]
IUnknownType = pythoncom.TypeIIDs[pythoncom.IID_IUnknown]
from .exception import COMException
error = __name__ + " error"
regSpec = 'CLSID\\%s\\PythonCOM'
regPolicy = 'CLSID\\%s\\PythonCOMPolicy'
regDispatcher = 'CLSID\\%s\\PythonCOMDispatcher'
regAddnPath = 'CLSID\\%s\\PythonCOMPath'
def CreateInstance(clsid, reqIID):
"""Create a new instance of the specified IID
The COM framework **always** calls this function to create a new
instance for the specified CLSID. This function looks up the
registry for the name of a policy, creates the policy, and asks the
policy to create the specified object by calling the _CreateInstance_ method.
Exactly how the policy creates the instance is up to the policy. See the
specific policy documentation for more details.
"""
# First see is sys.path should have something on it.
try:
addnPaths = win32api.RegQueryValue(win32con.HKEY_CLASSES_ROOT,
regAddnPath % clsid).split(';')
for newPath in addnPaths:
if newPath not in sys.path:
sys.path.insert(0, newPath)
except win32api.error:
pass
try:
policy = win32api.RegQueryValue(win32con.HKEY_CLASSES_ROOT,
regPolicy % clsid)
policy = resolve_func(policy)
except win32api.error:
policy = DefaultPolicy
try:
dispatcher = win32api.RegQueryValue(win32con.HKEY_CLASSES_ROOT,
regDispatcher % clsid)
if dispatcher: dispatcher = resolve_func(dispatcher)
except win32api.error:
dispatcher = None
if dispatcher:
retObj = dispatcher(policy, None)
else:
retObj = policy(None)
return retObj._CreateInstance_(clsid, reqIID)
class BasicWrapPolicy:
"""The base class of policies.
Normally not used directly (use a child class, instead)
This policy assumes we are wrapping another object
as the COM server. This supports the delegation of the core COM entry points
to either the wrapped object, or to a child class.
This policy supports the following special attributes on the wrapped object
_query_interface_ -- A handler which can respond to the COM 'QueryInterface' call.
_com_interfaces_ -- An optional list of IIDs which the interface will assume are
valid for the object.
_invoke_ -- A handler which can respond to the COM 'Invoke' call. If this attribute
is not provided, then the default policy implementation is used. If this attribute
does exist, it is responsible for providing all required functionality - ie, the
policy _invoke_ method is not invoked at all (and nor are you able to call it!)
_getidsofnames_ -- A handler which can respond to the COM 'GetIDsOfNames' call. If this attribute
is not provided, then the default policy implementation is used. If this attribute
does exist, it is responsible for providing all required functionality - ie, the
policy _getidsofnames_ method is not invoked at all (and nor are you able to call it!)
IDispatchEx functionality:
_invokeex_ -- Very similar to _invoke_, except slightly different arguments are used.
And the result is just the _real_ result (rather than the (hresult, argErr, realResult)
tuple that _invoke_ uses.
This is the new, prefered handler (the default _invoke_ handler simply called _invokeex_)
_getdispid_ -- Very similar to _getidsofnames_, except slightly different arguments are used,
and only 1 property at a time can be fetched (which is all we support in getidsofnames anyway!)
This is the new, prefered handler (the default _invoke_ handler simply called _invokeex_)
_getnextdispid_- uses self._name_to_dispid_ to enumerate the DISPIDs
"""
def __init__(self, object):
"""Initialise the policy object
Params:
object -- The object to wrap. May be None *iff* @BasicWrapPolicy._CreateInstance_@ will be
called immediately after this to setup a brand new object
"""
if object is not None:
self._wrap_(object)
def _CreateInstance_(self, clsid, reqIID):
"""Creates a new instance of a **wrapped** object
This method looks up a "@win32com.server.policy.regSpec@" % clsid entry
in the registry (using @DefaultPolicy@)
"""
try:
classSpec = win32api.RegQueryValue(win32con.HKEY_CLASSES_ROOT,
regSpec % clsid)
except win32api.error:
raise error("The object is not correctly registered - %s key can not be read" % (regSpec % clsid))
myob = call_func(classSpec)
self._wrap_(myob)
try:
return pythoncom.WrapObject(self, reqIID)
except pythoncom.com_error as xxx_todo_changeme:
(hr, desc, exc, arg) = xxx_todo_changeme.args
from win32com.util import IIDToInterfaceName
desc = "The object '%r' was created, but does not support the " \
"interface '%s'(%s): %s" \
% (myob, IIDToInterfaceName(reqIID), reqIID, desc)
raise pythoncom.com_error(hr, desc, exc, arg)
def _wrap_(self, object):
"""Wraps up the specified object.
This function keeps a reference to the passed
object, and may interogate it to determine how to respond to COM requests, etc.
"""
# We "clobber" certain of our own methods with ones
# provided by the wrapped object, iff they exist.
self._name_to_dispid_ = { }
ob = self._obj_ = object
if hasattr(ob, '_query_interface_'):
self._query_interface_ = ob._query_interface_
if hasattr(ob, '_invoke_'):
self._invoke_ = ob._invoke_
if hasattr(ob, '_invokeex_'):
self._invokeex_ = ob._invokeex_
if hasattr(ob, '_getidsofnames_'):
self._getidsofnames_ = ob._getidsofnames_
if hasattr(ob, '_getdispid_'):
self._getdispid_ = ob._getdispid_
# Allow for override of certain special attributes.
if hasattr(ob, '_com_interfaces_'):
self._com_interfaces_ = []
# Allow interfaces to be specified by name.
for i in ob._com_interfaces_:
if type(i) != pywintypes.IIDType:
# Prolly a string!
if i[0] != "{":
i = pythoncom.InterfaceNames[i]
else:
i = pythoncom.MakeIID(i)
self._com_interfaces_.append(i)
else:
self._com_interfaces_ = [ ]
# "QueryInterface" handling.
def _QueryInterface_(self, iid):
"""The main COM entry-point for QueryInterface.
This checks the _com_interfaces_ attribute and if the interface is not specified
there, it calls the derived helper _query_interface_
"""
if iid in self._com_interfaces_:
return 1
return self._query_interface_(iid)
def _query_interface_(self, iid):
"""Called if the object does not provide the requested interface in _com_interfaces_,
and does not provide a _query_interface_ handler.
Returns a result to the COM framework indicating the interface is not supported.
"""
return 0
# "Invoke" handling.
def _Invoke_(self, dispid, lcid, wFlags, args):
"""The main COM entry-point for Invoke.
This calls the _invoke_ helper.
"""
#Translate a possible string dispid to real dispid.
if type(dispid) == type(""):
try:
dispid = self._name_to_dispid_[dispid.lower()]
except KeyError:
raise COMException(scode = winerror.DISP_E_MEMBERNOTFOUND, desc="Member not found")
return self._invoke_(dispid, lcid, wFlags, args)
def _invoke_(self, dispid, lcid, wFlags, args):
# Delegates to the _invokeex_ implementation. This allows
# a custom policy to define _invokeex_, and automatically get _invoke_ too.
return S_OK, -1, self._invokeex_(dispid, lcid, wFlags, args, None, None)
# "GetIDsOfNames" handling.
def _GetIDsOfNames_(self, names, lcid):
"""The main COM entry-point for GetIDsOfNames.
This checks the validity of the arguments, and calls the _getidsofnames_ helper.
"""
if len(names) > 1:
raise COMException(scode = winerror.DISP_E_INVALID, desc="Cannot support member argument names")
return self._getidsofnames_(names, lcid)
def _getidsofnames_(self, names, lcid):
### note: lcid is being ignored...
return (self._getdispid_(names[0], 0), )
# IDispatchEx support for policies. Most of the IDispathEx functionality
# by default will raise E_NOTIMPL. Thus it is not necessary for derived
# policies to explicitely implement all this functionality just to not implement it!
def _GetDispID_(self, name, fdex):
return self._getdispid_(name, fdex)
def _getdispid_(self, name, fdex):
try:
### TODO - look at the fdex flags!!!
return self._name_to_dispid_[name.lower()]
except KeyError:
raise COMException(scode = winerror.DISP_E_UNKNOWNNAME)
# "InvokeEx" handling.
def _InvokeEx_(self, dispid, lcid, wFlags, args, kwargs, serviceProvider):
"""The main COM entry-point for InvokeEx.
This calls the _invokeex_ helper.
"""
#Translate a possible string dispid to real dispid.
if type(dispid) == type(""):
try:
dispid = self._name_to_dispid_[dispid.lower()]
except KeyError:
raise COMException(scode = winerror.DISP_E_MEMBERNOTFOUND, desc="Member not found")
return self._invokeex_(dispid, lcid, wFlags, args, kwargs, serviceProvider)
def _invokeex_(self, dispid, lcid, wFlags, args, kwargs, serviceProvider):
"""A stub for _invokeex_ - should never be called.
Simply raises an exception.
"""
# Base classes should override this method (and not call the base)
raise error("This class does not provide _invokeex_ semantics")
def _DeleteMemberByName_(self, name, fdex):
return self._deletememberbyname_(name, fdex)
def _deletememberbyname_(self, name, fdex):
raise COMException(scode = winerror.E_NOTIMPL)
def _DeleteMemberByDispID_(self, id):
return self._deletememberbydispid(id)
def _deletememberbydispid_(self, id):
raise COMException(scode = winerror.E_NOTIMPL)
def _GetMemberProperties_(self, id, fdex):
return self._getmemberproperties_(id, fdex)
def _getmemberproperties_(self, id, fdex):
raise COMException(scode = winerror.E_NOTIMPL)
def _GetMemberName_(self, dispid):
return self._getmembername_(dispid)
def _getmembername_(self, dispid):
raise COMException(scode = winerror.E_NOTIMPL)
def _GetNextDispID_(self, fdex, dispid):
return self._getnextdispid_(fdex, dispid)
def _getnextdispid_(self, fdex, dispid):
ids = list(self._name_to_dispid_.values())
ids.sort()
if DISPID_STARTENUM in ids: ids.remove(DISPID_STARTENUM)
if dispid==DISPID_STARTENUM:
return ids[0]
else:
try:
return ids[ids.index(dispid)+1]
except ValueError: # dispid not in list?
raise COMException(scode = winerror.E_UNEXPECTED)
except IndexError: # No more items
raise COMException(scode = winerror.S_FALSE)
def _GetNameSpaceParent_(self):
return self._getnamespaceparent()
def _getnamespaceparent_(self):
raise COMException(scode = winerror.E_NOTIMPL)
class MappedWrapPolicy(BasicWrapPolicy):
"""Wraps an object using maps to do its magic
This policy wraps up a Python object, using a number of maps
which translate from a Dispatch ID and flags, into an object to call/getattr, etc.
It is the responsibility of derived classes to determine exactly how the
maps are filled (ie, the derived classes determine the map filling policy.
This policy supports the following special attributes on the wrapped object
_dispid_to_func_/_dispid_to_get_/_dispid_to_put_ -- These are dictionaries
(keyed by integer dispid, values are string attribute names) which the COM
implementation uses when it is processing COM requests. Note that the implementation
uses this dictionary for its own purposes - not a copy - which means the contents of
these dictionaries will change as the object is used.
"""
def _wrap_(self, object):
BasicWrapPolicy._wrap_(self, object)
ob = self._obj_
if hasattr(ob, '_dispid_to_func_'):
self._dispid_to_func_ = ob._dispid_to_func_
else:
self._dispid_to_func_ = { }
if hasattr(ob, '_dispid_to_get_'):
self._dispid_to_get_ = ob._dispid_to_get_
else:
self._dispid_to_get_ = { }
if hasattr(ob, '_dispid_to_put_'):
self._dispid_to_put_ = ob._dispid_to_put_
else:
self._dispid_to_put_ = { }
def _getmembername_(self, dispid):
if dispid in self._dispid_to_func_:
return self._dispid_to_func_[dispid]
elif dispid in self._dispid_to_get_:
return self._dispid_to_get_[dispid]
elif dispid in self._dispid_to_put_:
return self._dispid_to_put_[dispid]
else:
raise COMException(scode = winerror.DISP_E_MEMBERNOTFOUND)
class DesignatedWrapPolicy(MappedWrapPolicy):
"""A policy which uses a mapping to link functions and dispid
A MappedWrappedPolicy which allows the wrapped object to specify, via certain
special named attributes, exactly which methods and properties are exposed.
All a wrapped object need do is provide the special attributes, and the policy
will handle everything else.
Attributes:
_public_methods_ -- Required, unless a typelib GUID is given -- A list
of strings, which must be the names of methods the object
provides. These methods will be exposed and callable
from other COM hosts.
_public_attrs_ A list of strings, which must be the names of attributes on the object.
These attributes will be exposed and readable and possibly writeable from other COM hosts.
_readonly_attrs_ -- A list of strings, which must also appear in _public_attrs. These
attributes will be readable, but not writable, by other COM hosts.
_value_ -- A method that will be called if the COM host requests the "default" method
(ie, calls Invoke with dispid==DISPID_VALUE)
_NewEnum -- A method that will be called if the COM host requests an enumerator on the
object (ie, calls Invoke with dispid==DISPID_NEWENUM.)
It is the responsibility of the method to ensure the returned
object conforms to the required Enum interface.
_typelib_guid_ -- The GUID of the typelibrary with interface definitions we use.
_typelib_version_ -- A tuple of (major, minor) with a default of 1,1
_typelib_lcid_ -- The LCID of the typelib, default = LOCALE_USER_DEFAULT
_Evaluate -- Dunno what this means, except the host has called Invoke with dispid==DISPID_EVALUATE!
See the COM documentation for details.
"""
def _wrap_(self, ob):
# If we have nominated universal interfaces to support, load them now
tlb_guid = getattr(ob, '_typelib_guid_', None)
if tlb_guid is not None:
tlb_major, tlb_minor = getattr(ob, '_typelib_version_', (1,0))
tlb_lcid = getattr(ob, '_typelib_lcid_', 0)
from win32com import universal
# XXX - what if the user wants to implement interfaces from multiple
# typelibs?
# Filter out all 'normal' IIDs (ie, IID objects and strings starting with {
interfaces = [i for i in getattr(ob, '_com_interfaces_', [])
if type(i) != pywintypes.IIDType and not i.startswith("{")]
universal_data = universal.RegisterInterfaces(tlb_guid, tlb_lcid,
tlb_major, tlb_minor, interfaces)
else:
universal_data = []
MappedWrapPolicy._wrap_(self, ob)
if not hasattr(ob, '_public_methods_') and not hasattr(ob, "_typelib_guid_"):
raise error("Object does not support DesignatedWrapPolicy, as it does not have either _public_methods_ or _typelib_guid_ attributes.")
# Copy existing _dispid_to_func_ entries to _name_to_dispid_
for dispid, name in self._dispid_to_func_.items():
self._name_to_dispid_[name.lower()]=dispid
for dispid, name in self._dispid_to_get_.items():
self._name_to_dispid_[name.lower()]=dispid
for dispid, name in self._dispid_to_put_.items():
self._name_to_dispid_[name.lower()]=dispid
# Patch up the universal stuff.
for dispid, invkind, name in universal_data:
self._name_to_dispid_[name.lower()]=dispid
if invkind == DISPATCH_METHOD:
self._dispid_to_func_[dispid] = name
elif invkind in (DISPATCH_PROPERTYPUT, DISPATCH_PROPERTYPUTREF):
self._dispid_to_put_[dispid] = name
elif invkind == DISPATCH_PROPERTYGET:
self._dispid_to_get_[dispid] = name
else:
raise ValueError("unexpected invkind: %d (%s)" % (invkind,name))
# look for reserved methods
if hasattr(ob, '_value_'):
self._dispid_to_get_[DISPID_VALUE] = '_value_'
self._dispid_to_put_[DISPID_PROPERTYPUT] = '_value_'
if hasattr(ob, '_NewEnum'):
self._name_to_dispid_['_newenum'] = DISPID_NEWENUM
self._dispid_to_func_[DISPID_NEWENUM] = '_NewEnum'
if hasattr(ob, '_Evaluate'):
self._name_to_dispid_['_evaluate'] = DISPID_EVALUATE
self._dispid_to_func_[DISPID_EVALUATE] = '_Evaluate'
next_dispid = self._allocnextdispid(999)
# note: funcs have precedence over attrs (install attrs first)
if hasattr(ob, '_public_attrs_'):
if hasattr(ob, '_readonly_attrs_'):
readonly = ob._readonly_attrs_
else:
readonly = [ ]
for name in ob._public_attrs_:
dispid = self._name_to_dispid_.get(name.lower())
if dispid is None:
dispid = next_dispid
self._name_to_dispid_[name.lower()] = dispid
next_dispid = self._allocnextdispid(next_dispid)
self._dispid_to_get_[dispid] = name
if name not in readonly:
self._dispid_to_put_[dispid] = name
for name in getattr(ob, "_public_methods_", []):
dispid = self._name_to_dispid_.get(name.lower())
if dispid is None:
dispid = next_dispid
self._name_to_dispid_[name.lower()] = dispid
next_dispid = self._allocnextdispid(next_dispid)
self._dispid_to_func_[dispid] = name
self._typeinfos_ = None # load these on demand.
def _build_typeinfos_(self):
# Can only ever be one for now.
tlb_guid = getattr(self._obj_, '_typelib_guid_', None)
if tlb_guid is None:
return []
tlb_major, tlb_minor = getattr(self._obj_, '_typelib_version_', (1,0))
tlb = pythoncom.LoadRegTypeLib(tlb_guid, tlb_major, tlb_minor)
typecomp = tlb.GetTypeComp()
# Not 100% sure what semantics we should use for the default interface.
# Look for the first name in _com_interfaces_ that exists in the typelib.
for iname in self._obj_._com_interfaces_:
try:
type_info, type_comp = typecomp.BindType(iname)
if type_info is not None:
return [type_info]
except pythoncom.com_error:
pass
return []
def _GetTypeInfoCount_(self):
if self._typeinfos_ is None:
self._typeinfos_ = self._build_typeinfos_()
return len(self._typeinfos_)
def _GetTypeInfo_(self, index, lcid):
if self._typeinfos_ is None:
self._typeinfos_ = self._build_typeinfos_()
if index < 0 or index >= len(self._typeinfos_):
raise COMException(scode=winerror.DISP_E_BADINDEX)
return 0, self._typeinfos_[index]
def _allocnextdispid(self, last_dispid):
while 1:
last_dispid = last_dispid + 1
if last_dispid not in self._dispid_to_func_ and \
last_dispid not in self._dispid_to_get_ and \
last_dispid not in self._dispid_to_put_:
return last_dispid
def _invokeex_(self, dispid, lcid, wFlags, args, kwArgs, serviceProvider):
### note: lcid is being ignored...
if wFlags & DISPATCH_METHOD:
try:
funcname = self._dispid_to_func_[dispid]
except KeyError:
if not wFlags & DISPATCH_PROPERTYGET:
raise COMException(scode=winerror.DISP_E_MEMBERNOTFOUND) # not found
else:
try:
func = getattr(self._obj_, funcname)
except AttributeError:
# May have a dispid, but that doesnt mean we have the function!
raise COMException(scode=winerror.DISP_E_MEMBERNOTFOUND)
# Should check callable here
try:
return func(*args)
except TypeError as v:
# Particularly nasty is "wrong number of args" type error
# This helps you see what 'func' and 'args' actually is
if str(v).find("arguments")>=0:
print("** TypeError %s calling function %r(%r)" % (v, func, args))
raise
if wFlags & DISPATCH_PROPERTYGET:
try:
name = self._dispid_to_get_[dispid]
except KeyError:
raise COMException(scode=winerror.DISP_E_MEMBERNOTFOUND) # not found
retob = getattr(self._obj_, name)
if type(retob)==types.MethodType: # a method as a property - call it.
retob = retob(*args)
return retob
if wFlags & (DISPATCH_PROPERTYPUT | DISPATCH_PROPERTYPUTREF): ### correct?
try:
name = self._dispid_to_put_[dispid]
except KeyError:
raise COMException(scode=winerror.DISP_E_MEMBERNOTFOUND) # read-only
# If we have a method of that name (ie, a property get function), and
# we have an equiv. property set function, use that instead.
if type(getattr(self._obj_, name, None)) == types.MethodType and \
type(getattr(self._obj_, "Set" + name, None)) == types.MethodType:
fn = getattr(self._obj_, "Set" + name)
fn( *args )
else:
# just set the attribute
setattr(self._obj_, name, args[0])
return
raise COMException(scode=winerror.E_INVALIDARG, desc="invalid wFlags")
class EventHandlerPolicy(DesignatedWrapPolicy):
"""The default policy used by event handlers in the win32com.client package.
In addition to the base policy, this provides argument conversion semantics for
params
* dispatch params are converted to dispatch objects.
* Unicode objects are converted to strings (1.5.2 and earlier)
NOTE: Later, we may allow the object to override this process??
"""
def _transform_args_(self, args, kwArgs, dispid, lcid, wFlags, serviceProvider):
ret = []
for arg in args:
arg_type = type(arg)
if arg_type == IDispatchType:
import win32com.client
arg = win32com.client.Dispatch(arg)
elif arg_type == IUnknownType:
try:
import win32com.client
arg = win32com.client.Dispatch(arg.QueryInterface(pythoncom.IID_IDispatch))
except pythoncom.error:
pass # Keep it as IUnknown
ret.append(arg)
return tuple(ret), kwArgs
def _invokeex_(self, dispid, lcid, wFlags, args, kwArgs, serviceProvider):
# transform the args.
args, kwArgs = self._transform_args_(args, kwArgs, dispid, lcid, wFlags, serviceProvider)
return DesignatedWrapPolicy._invokeex_( self, dispid, lcid, wFlags, args, kwArgs, serviceProvider)
class DynamicPolicy(BasicWrapPolicy):
"""A policy which dynamically (ie, at run-time) determines public interfaces.
A dynamic policy is used to dynamically dispatch methods and properties to the
wrapped object. The list of objects and properties does not need to be known in
advance, and methods or properties added to the wrapped object after construction
are also handled.
The wrapped object must provide the following attributes:
_dynamic_ -- A method that will be called whenever an invoke on the object
is called. The method is called with the name of the underlying method/property
(ie, the mapping of dispid to/from name has been resolved.) This name property
may also be '_value_' to indicate the default, and '_NewEnum' to indicate a new
enumerator is requested.
"""
def _wrap_(self, object):
BasicWrapPolicy._wrap_(self, object)
if not hasattr(self._obj_, '_dynamic_'):
raise error("Object does not support Dynamic COM Policy")
self._next_dynamic_ = self._min_dynamic_ = 1000
self._dyn_dispid_to_name_ = {DISPID_VALUE:'_value_', DISPID_NEWENUM:'_NewEnum' }
def _getdispid_(self, name, fdex):
# TODO - Look at fdex flags.
lname = name.lower()
try:
return self._name_to_dispid_[lname]
except KeyError:
dispid = self._next_dynamic_ = self._next_dynamic_ + 1
self._name_to_dispid_[lname] = dispid
self._dyn_dispid_to_name_[dispid] = name # Keep case in this map...
return dispid
def _invoke_(self, dispid, lcid, wFlags, args):
return S_OK, -1, self._invokeex_(dispid, lcid, wFlags, args, None, None)
def _invokeex_(self, dispid, lcid, wFlags, args, kwargs, serviceProvider):
### note: lcid is being ignored...
### note: kwargs is being ignored...
### note: serviceProvider is being ignored...
### there might be assigned DISPID values to properties, too...
try:
name = self._dyn_dispid_to_name_[dispid]
except KeyError:
raise COMException(scode = winerror.DISP_E_MEMBERNOTFOUND, desc="Member not found")
return self._obj_._dynamic_(name, lcid, wFlags, args)
DefaultPolicy = DesignatedWrapPolicy
def resolve_func(spec):
"""Resolve a function by name
Given a function specified by 'module.function', return a callable object
(ie, the function itself)
"""
try:
idx = spec.rindex(".")
mname = spec[:idx]
fname = spec[idx+1:]
# Dont attempt to optimize by looking in sys.modules,
# as another thread may also be performing the import - this
# way we take advantage of the built-in import lock.
module = _import_module(mname)
return getattr(module, fname)
except ValueError: # No "." in name - assume in this module
return globals()[spec]
def call_func(spec, *args):
"""Call a function specified by name.
Call a function specified by 'module.function' and return the result.
"""
return resolve_func(spec)(*args)
def _import_module(mname):
"""Import a module just like the 'import' statement.
Having this function is much nicer for importing arbitrary modules than
using the 'exec' keyword. It is more efficient and obvious to the reader.
"""
__import__(mname)
# Eeek - result of _import_ is "win32com" - not "win32com.a.b.c"
# Get the full module from sys.modules
return sys.modules[mname]
#######
#
# Temporary hacks until all old code moves.
#
# These have been moved to a new source file, but some code may
# still reference them here. These will end up being removed.
try:
from .dispatcher import DispatcherTrace, DispatcherWin32trace
except ImportError: # Quite likely a frozen executable that doesnt need dispatchers
pass
| |
import logging
import re
import secrets
from email.headerregistry import AddressHeader
from email.message import EmailMessage
from typing import Dict, List, Optional, Tuple
from django.conf import settings
from django.utils.timezone import now as timezone_now
from django.utils.timezone import timedelta
from zerver.lib.actions import (
check_send_message,
internal_send_huddle_message,
internal_send_private_message,
internal_send_stream_message,
)
from zerver.lib.email_mirror_helpers import (
ZulipEmailForwardError,
decode_email_address,
get_email_gateway_message_string_from_address,
)
from zerver.lib.email_notifications import convert_html_to_markdown
from zerver.lib.exceptions import JsonableError, RateLimited
from zerver.lib.message import normalize_body, truncate_topic
from zerver.lib.queue import queue_json_publish
from zerver.lib.rate_limiter import RateLimitedObject
from zerver.lib.send_email import FromAddress
from zerver.lib.upload import upload_message_file
from zerver.models import (
Message,
MissedMessageEmailAddress,
Realm,
Recipient,
Stream,
UserProfile,
get_client,
get_display_recipient,
get_stream_by_id_in_realm,
get_system_bot,
get_user,
)
from zproject.backends import is_user_active
logger = logging.getLogger(__name__)
def redact_email_address(error_message: str) -> str:
if not settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK:
domain = settings.EMAIL_GATEWAY_PATTERN.rsplit("@")[-1]
else:
# EMAIL_GATEWAY_EXTRA_PATTERN_HACK is of the form '@example.com'
domain = settings.EMAIL_GATEWAY_EXTRA_PATTERN_HACK[1:]
address_match = re.search("\\b(\\S*?)@" + domain, error_message)
if address_match:
email_address = address_match.group(0)
# Annotate basic info about the address before scrubbing:
if is_missed_message_address(email_address):
redacted_message = error_message.replace(
email_address, f"{email_address} <Missed message address>"
)
else:
try:
target_stream_id = decode_stream_email_address(email_address)[0].id
annotated_address = f"{email_address} <Address to stream id: {target_stream_id}>"
redacted_message = error_message.replace(email_address, annotated_address)
except ZulipEmailForwardError:
redacted_message = error_message.replace(
email_address, f"{email_address} <Invalid address>"
)
# Scrub the address from the message, to the form XXXXX@example.com:
string_to_scrub = address_match.groups()[0]
redacted_message = redacted_message.replace(string_to_scrub, "X" * len(string_to_scrub))
return redacted_message
return error_message
def report_to_zulip(error_message: str) -> None:
if settings.ERROR_BOT is None:
return
error_bot = get_system_bot(settings.ERROR_BOT)
error_stream = Stream.objects.get(name="errors", realm=error_bot.realm)
send_zulip(
error_bot,
error_stream,
"email mirror error",
f"""~~~\n{error_message}\n~~~""",
)
def log_and_report(email_message: EmailMessage, error_message: str, to: Optional[str]) -> None:
recipient = to or "No recipient found"
error_message = "Sender: {}\nTo: {}\n{}".format(
email_message.get("From"), recipient, error_message
)
error_message = redact_email_address(error_message)
logger.error(error_message)
report_to_zulip(error_message)
# Temporary missed message addresses
def generate_missed_message_token() -> str:
return "mm" + secrets.token_hex(16)
def is_missed_message_address(address: str) -> bool:
try:
msg_string = get_email_gateway_message_string_from_address(address)
except ZulipEmailForwardError:
return False
return is_mm_32_format(msg_string)
def is_mm_32_format(msg_string: Optional[str]) -> bool:
"""
Missed message strings are formatted with a little "mm" prefix
followed by a randomly generated 32-character string.
"""
return msg_string is not None and msg_string.startswith("mm") and len(msg_string) == 34
def get_missed_message_token_from_address(address: str) -> str:
msg_string = get_email_gateway_message_string_from_address(address)
if not is_mm_32_format(msg_string):
raise ZulipEmailForwardError("Could not parse missed message address")
return msg_string
def get_usable_missed_message_address(address: str) -> MissedMessageEmailAddress:
token = get_missed_message_token_from_address(address)
try:
mm_address = MissedMessageEmailAddress.objects.select_related().get(
email_token=token,
timestamp__gt=timezone_now()
- timedelta(seconds=MissedMessageEmailAddress.EXPIRY_SECONDS),
)
except MissedMessageEmailAddress.DoesNotExist:
raise ZulipEmailForwardError("Missed message address expired or doesn't exist.")
if not mm_address.is_usable():
# Technical, this also checks whether the event is expired,
# but that case is excluded by the logic above.
raise ZulipEmailForwardError("Missed message address out of uses.")
return mm_address
def create_missed_message_address(user_profile: UserProfile, message: Message) -> str:
# If the email gateway isn't configured, we specify a reply
# address, since there's no useful way for the user to reply into
# Zulip.
if settings.EMAIL_GATEWAY_PATTERN == "":
return FromAddress.NOREPLY
mm_address = MissedMessageEmailAddress.objects.create(
message=message, user_profile=user_profile, email_token=generate_missed_message_token()
)
return str(mm_address)
def construct_zulip_body(
message: EmailMessage,
realm: Realm,
show_sender: bool = False,
include_quotes: bool = False,
include_footer: bool = False,
prefer_text: bool = True,
) -> str:
body = extract_body(message, include_quotes, prefer_text)
# Remove null characters, since Zulip will reject
body = body.replace("\x00", "")
if not include_footer:
body = filter_footer(body)
if not body.endswith("\n"):
body += "\n"
body += extract_and_upload_attachments(message, realm)
if not body.rstrip():
body = "(No email body)"
if show_sender:
sender = str(message.get("From", ""))
body = f"From: {sender}\n{body}"
return body
## Sending the Zulip ##
class ZulipEmailForwardUserError(ZulipEmailForwardError):
pass
def send_zulip(sender: UserProfile, stream: Stream, topic: str, content: str) -> None:
internal_send_stream_message(
sender,
stream,
truncate_topic(topic),
normalize_body(content),
email_gateway=True,
)
def send_mm_reply_to_stream(
user_profile: UserProfile, stream: Stream, topic: str, body: str
) -> None:
try:
check_send_message(
sender=user_profile,
client=get_client("Internal"),
message_type_name="stream",
message_to=[stream.id],
topic_name=topic,
message_content=body,
)
except JsonableError as error:
error_message = "Error sending message to stream {stream} via message notification email reply:\n{error}".format(
stream=stream.name, error=error.msg
)
internal_send_private_message(
get_system_bot(settings.NOTIFICATION_BOT), user_profile, error_message
)
def get_message_part_by_type(message: EmailMessage, content_type: str) -> Optional[str]:
charsets = message.get_charsets()
for idx, part in enumerate(message.walk()):
if part.get_content_type() == content_type:
content = part.get_payload(decode=True)
assert isinstance(content, bytes)
if charsets[idx]:
return content.decode(charsets[idx], errors="ignore")
# If no charset has been specified in the header, assume us-ascii,
# by RFC6657: https://tools.ietf.org/html/rfc6657
else:
return content.decode("us-ascii", errors="ignore")
return None
def extract_body(
message: EmailMessage, include_quotes: bool = False, prefer_text: bool = True
) -> str:
plaintext_content = extract_plaintext_body(message, include_quotes)
html_content = extract_html_body(message, include_quotes)
if plaintext_content is None and html_content is None:
logger.warning("Content types: %s", [part.get_content_type() for part in message.walk()])
raise ZulipEmailForwardUserError("Unable to find plaintext or HTML message body")
if not plaintext_content and not html_content:
raise ZulipEmailForwardUserError("Email has no nonempty body sections; ignoring.")
if prefer_text:
if plaintext_content:
return plaintext_content
else:
assert html_content # Needed for mypy. Ensured by the validating block above.
return html_content
else:
if html_content:
return html_content
else:
assert plaintext_content # Needed for mypy. Ensured by the validating block above.
return plaintext_content
talon_initialized = False
def extract_plaintext_body(message: EmailMessage, include_quotes: bool = False) -> Optional[str]:
import talon_core
global talon_initialized
if not talon_initialized:
talon_core.init()
talon_initialized = True
plaintext_content = get_message_part_by_type(message, "text/plain")
if plaintext_content is not None:
if include_quotes:
return plaintext_content
else:
return talon_core.quotations.extract_from_plain(plaintext_content)
else:
return None
def extract_html_body(message: EmailMessage, include_quotes: bool = False) -> Optional[str]:
import talon_core
global talon_initialized
if not talon_initialized: # nocoverage
talon_core.init()
talon_initialized = True
html_content = get_message_part_by_type(message, "text/html")
if html_content is not None:
if include_quotes:
return convert_html_to_markdown(html_content)
else:
return convert_html_to_markdown(talon_core.quotations.extract_from_html(html_content))
else:
return None
def filter_footer(text: str) -> str:
# Try to filter out obvious footers.
possible_footers = [line for line in text.split("\n") if line.strip() == "--"]
if len(possible_footers) != 1:
# Be conservative and don't try to scrub content if there
# isn't a trivial footer structure.
return text
return re.split(r"^\s*--\s*$", text, 1, flags=re.MULTILINE)[0].strip()
def extract_and_upload_attachments(message: EmailMessage, realm: Realm) -> str:
user_profile = get_system_bot(settings.EMAIL_GATEWAY_BOT)
attachment_links = []
for part in message.walk():
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
attachment = part.get_payload(decode=True)
if isinstance(attachment, bytes):
s3_url = upload_message_file(
filename,
len(attachment),
content_type,
attachment,
user_profile,
target_realm=realm,
)
formatted_link = f"[{filename}]({s3_url})"
attachment_links.append(formatted_link)
else:
logger.warning(
"Payload is not bytes (invalid attachment %s in message from %s).",
filename,
message.get("From"),
)
return "\n".join(attachment_links)
def decode_stream_email_address(email: str) -> Tuple[Stream, Dict[str, bool]]:
token, options = decode_email_address(email)
try:
stream = Stream.objects.get(email_token=token)
except Stream.DoesNotExist:
raise ZulipEmailForwardError("Bad stream token from email recipient " + email)
return stream, options
def find_emailgateway_recipient(message: EmailMessage) -> str:
# We can't use Delivered-To; if there is a X-Gm-Original-To
# it is more accurate, so try to find the most-accurate
# recipient list in descending priority order
recipient_headers = [
"X-Gm-Original-To",
"Delivered-To",
"Envelope-To",
"Resent-To",
"Resent-CC",
"To",
"CC",
]
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split("%s")]
match_email_re = re.compile(".*?".join(pattern_parts))
for header_name in recipient_headers:
for header_value in message.get_all(header_name, []):
if isinstance(header_value, AddressHeader):
emails = [addr.addr_spec for addr in header_value.addresses]
else:
emails = [str(header_value)]
for email in emails:
if match_email_re.match(email):
return email
raise ZulipEmailForwardError("Missing recipient in mirror email")
def strip_from_subject(subject: str) -> str:
# strips RE and FWD from the subject
# from: https://stackoverflow.com/questions/9153629/regex-code-for-removing-fwd-re-etc-from-email-subject
reg = r"([\[\(] *)?\b(RE|FWD?) *([-:;)\]][ :;\])-]*|$)|\]+ *$"
stripped = re.sub(reg, "", subject, flags=re.IGNORECASE | re.MULTILINE)
return stripped.strip()
def is_forwarded(subject: str) -> bool:
# regex taken from strip_from_subject, we use it to detect various forms
# of FWD at the beginning of the subject.
reg = r"([\[\(] *)?\b(FWD?) *([-:;)\]][ :;\])-]*|$)|\]+ *$"
return bool(re.match(reg, subject, flags=re.IGNORECASE))
def process_stream_message(to: str, message: EmailMessage) -> None:
subject_header = message.get("Subject", "")
subject = strip_from_subject(subject_header) or "(no topic)"
stream, options = decode_stream_email_address(to)
# Don't remove quotations if message is forwarded, unless otherwise specified:
if "include_quotes" not in options:
options["include_quotes"] = is_forwarded(subject_header)
body = construct_zulip_body(message, stream.realm, **options)
send_zulip(get_system_bot(settings.EMAIL_GATEWAY_BOT), stream, subject, body)
logger.info(
"Successfully processed email to %s (%s)",
stream.name,
stream.realm.string_id,
)
def process_missed_message(to: str, message: EmailMessage) -> None:
mm_address = get_usable_missed_message_address(to)
mm_address.increment_times_used()
user_profile = mm_address.user_profile
topic = mm_address.message.topic_name()
if mm_address.message.recipient.type == Recipient.PERSONAL:
# We need to reply to the sender so look up their personal recipient_id
recipient = mm_address.message.sender.recipient
else:
recipient = mm_address.message.recipient
if not is_user_active(user_profile):
logger.warning("Sending user is not active. Ignoring this message notification email.")
return
body = construct_zulip_body(message, user_profile.realm)
if recipient.type == Recipient.STREAM:
stream = get_stream_by_id_in_realm(recipient.type_id, user_profile.realm)
send_mm_reply_to_stream(user_profile, stream, topic, body)
recipient_str = stream.name
elif recipient.type == Recipient.PERSONAL:
display_recipient = get_display_recipient(recipient)
assert not isinstance(display_recipient, str)
recipient_str = display_recipient[0]["email"]
recipient_user = get_user(recipient_str, user_profile.realm)
internal_send_private_message(user_profile, recipient_user, body)
elif recipient.type == Recipient.HUDDLE:
display_recipient = get_display_recipient(recipient)
assert not isinstance(display_recipient, str)
emails = [user_dict["email"] for user_dict in display_recipient]
recipient_str = ", ".join(emails)
internal_send_huddle_message(user_profile.realm, user_profile, emails, body)
else:
raise AssertionError("Invalid recipient type!")
logger.info(
"Successfully processed email from user %s to %s",
user_profile.id,
recipient_str,
)
def process_message(message: EmailMessage, rcpt_to: Optional[str] = None) -> None:
to: Optional[str] = None
try:
if rcpt_to is not None:
to = rcpt_to
else:
to = find_emailgateway_recipient(message)
if is_missed_message_address(to):
process_missed_message(to, message)
else:
process_stream_message(to, message)
except ZulipEmailForwardUserError as e:
# TODO: notify sender of error, retry if appropriate.
logger.warning(e.args[0])
except ZulipEmailForwardError as e:
log_and_report(message, e.args[0], to)
def validate_to_address(rcpt_to: str) -> None:
if is_missed_message_address(rcpt_to):
get_usable_missed_message_address(rcpt_to)
else:
decode_stream_email_address(rcpt_to)
def mirror_email_message(rcpt_to: str, msg_base64: str) -> Dict[str, str]:
try:
validate_to_address(rcpt_to)
except ZulipEmailForwardError as e:
return {
"status": "error",
"msg": f"5.1.1 Bad destination mailbox address: {e}",
}
queue_json_publish(
"email_mirror",
{
"rcpt_to": rcpt_to,
"msg_base64": msg_base64,
},
)
return {"status": "success"}
# Email mirror rate limiter code:
class RateLimitedRealmMirror(RateLimitedObject):
def __init__(self, realm: Realm) -> None:
self.realm = realm
super().__init__()
def key(self) -> str:
return f"{type(self).__name__}:{self.realm.string_id}"
def rules(self) -> List[Tuple[int, int]]:
return settings.RATE_LIMITING_MIRROR_REALM_RULES
def rate_limit_mirror_by_realm(recipient_realm: Realm) -> None:
ratelimited = RateLimitedRealmMirror(recipient_realm).rate_limit()[0]
if ratelimited:
raise RateLimited()
| |
""" $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import itertools
import math
from nn_dataflow.core import Cost
from nn_dataflow.core import DataCategoryEnum as de
from nn_dataflow.core import LoopBlockingScheme
from nn_dataflow.core import LoopEnum as le
from nn_dataflow.core import MemHierEnum as me
from . import TestLoopBlockingFixture
class TestLoopBlockingScheme(TestLoopBlockingFixture):
''' Tests for LoopBlockingScheme. '''
def test_is_valid(self):
''' Whether is_valid. '''
# REGF size fails early.
lbs = self._lbs(self._make_bl_ts((1, 1, 0), (0, 1, 1), (1, 1, 0)))
self.assertFalse(lbs.is_valid())
self.assertFalse(hasattr(lbs, 'fetch'))
# GBUF size fails early.
lbs = self._lbs(self._make_bl_ts((0, 1, 1), (1, 0, 1), (1, 0, 1)),
rsrckey='SM')
self.assertFalse(lbs.is_valid())
self.assertFalse(hasattr(lbs, 'fetch'))
# GBUF size fails at recheck.
lbs = self._lbs(self._make_bl_ts((0, 1, 1), (1, 0, 1), (1, 0, 1)),
rsrckey='SM', optkey='BYP')
self.assertFalse(lbs.is_valid())
self.assertTrue(hasattr(lbs, 'fetch'))
# Valid.
lbs = self._lbs(self._make_bl_ts((0, 1, 1), (0, 1, 1), (0, 1, 1)))
self.assertTrue(lbs.is_valid())
lbs = self._lbs(self._make_bl_ts(
(self.nld['BASE'].loopcnt[le.IFM], 1, 1),
(self.nld['BASE'].loopcnt[le.OFM], 1, 1),
(self.nld['BASE'].loopcnt[le.BAT], 1, 1)))
self.assertTrue(lbs.is_valid())
def test_data_size(self):
''' Get data_size. '''
for si, so, sb in itertools.product([1, 2, 4], [1, 2, 4], [1, 2, 4]):
# REGF size.
lbs = self._lbs(self._make_bl_ts((0, 1, si), (0, 1, so),
(0, 1, sb)), rsrckey='LG')
self.assertTrue(lbs.is_valid())
self.assertEqual(lbs.data_size(1, de.FIL),
si * so * self.nld['BASE'].usize_regf_of(de.FIL))
self.assertEqual(lbs.data_size(1, de.IFM),
si * sb * self.nld['BASE'].usize_regf_of(de.IFM))
self.assertEqual(lbs.data_size(1, de.OFM),
so * sb * self.nld['BASE'].usize_regf_of(de.OFM))
self.assertEqual(lbs.data_size(1),
si * so * self.nld['BASE'].usize_regf_of(de.FIL)
+ si * sb * self.nld['BASE'].usize_regf_of(de.IFM)
+ so * sb * self.nld['BASE'].usize_regf_of(de.OFM))
# GBUF size.
lbs = self._lbs(self._make_bl_ts((0, si, 1), (0, so, 1),
(0, sb, 1)), rsrckey='LG')
self.assertTrue(lbs.is_valid())
self.assertEqual(lbs.data_size(0, de.FIL),
si * so * self.nld['BASE'].usize_gbuf_of(de.FIL))
self.assertEqual(lbs.data_size(0, de.IFM),
si * sb * self.nld['BASE'].usize_gbuf_of(de.IFM))
self.assertEqual(lbs.data_size(0, de.OFM),
so * sb * self.nld['BASE'].usize_gbuf_of(de.OFM))
self.assertEqual(lbs.data_size(0),
si * so * self.nld['BASE'].usize_gbuf_of(de.FIL)
+ si * sb * self.nld['BASE'].usize_gbuf_of(de.IFM)
+ so * sb * self.nld['BASE'].usize_gbuf_of(de.OFM))
self.assertTrue(all(lbs.stored_in_gbuf))
def test_data_size_bypass(self):
''' Get data_size bypass. '''
for si, so, sb in itertools.product([1, 2, 4], [1, 2, 4], [1, 2, 4]):
# GBUF size.
lbs = self._lbs(self._make_bl_ts((0, si, 1), (0, so, 1),
(0, sb, 1)), optkey='BYP')
if lbs.is_valid():
if not lbs.stored_in_gbuf[de.FIL]:
self.assertEqual(lbs.data_size(0, de.FIL), 0)
if not lbs.stored_in_gbuf[de.IFM]:
self.assertEqual(lbs.data_size(0, de.IFM), 0)
if not lbs.stored_in_gbuf[de.OFM]:
self.assertEqual(lbs.data_size(0, de.OFM), 0)
self.assertEqual(lbs.data_size(0),
lbs.data_size(0, de.FIL)
+ lbs.data_size(0, de.IFM)
+ lbs.data_size(0, de.OFM))
def test_data_size_inv_args(self):
''' Get data_size invalid args. '''
lbs = self._lbs(self._make_bl_ts((0, 1, 1), (0, 1, 1), (0, 1, 1)))
with self.assertRaises(IndexError):
_ = lbs.data_size(3)
with self.assertRaises(IndexError):
_ = lbs.data_size(0, 4)
def test_access(self):
''' get_access. '''
for bl_ts, bl_ords in self._gen_loopblocking_all():
lbs = self._lbs(bl_ts, bl_ords, rsrckey='LG')
self.assertTrue(lbs.is_valid())
self.assertSequenceEqual(bl_ts, lbs.bl_ts)
self.assertSequenceEqual(bl_ords, lbs.bl_ords)
# Model.
access = lbs.get_access()
# Sim.
dram_access, gbuf_access = self._sim_access_conv(lbs)
self.assertListEqual(access[me.DRAM], dram_access,
'test_access: DRAM: '
'model {} vs. sim {}. lbs: {} {}.'
.format(access[me.DRAM], dram_access,
bl_ts, bl_ords))
self.assertListEqual(access[me.GBUF], gbuf_access,
'test_access: GBUF: '
'model {} vs. sim {}. lbs: {} {}.'
.format(access[me.GBUF], gbuf_access,
bl_ts, bl_ords))
self.assertListEqual(access[me.REGF],
[lbs.ops, lbs.ops, lbs.ops * 2])
def test_access_bypass(self):
''' get_access bypass. '''
for bl_ts, bl_ords in self._gen_loopblocking_all():
lbs = self._lbs(bl_ts, bl_ords, rsrckey='LG', optkey='BYP')
self.assertTrue(lbs.is_valid())
if all(lbs.stored_in_gbuf):
continue
# Model.
access = lbs.get_access()
# Sim.
dram_access, gbuf_access = self._sim_access_conv(lbs)
self.assertListEqual(access[me.DRAM], dram_access,
'test_access_bypass: DRAM: '
'model {} vs. sim {}. lbs: {} {}. '
'stored in gbuf {}.'
.format(access[me.DRAM], dram_access,
bl_ts, bl_ords,
lbs.stored_in_gbuf))
self.assertListEqual(access[me.GBUF], gbuf_access,
'test_access_bypass: GBUF: '
'model {} vs. sim {}. lbs: {} {}. '
'stored in gbuf {}.'
.format(access[me.GBUF], gbuf_access,
bl_ts, bl_ords,
lbs.stored_in_gbuf))
def test_access_bypass_lgfil(self):
''' get_access bypass for ConvLayer with large filter size. '''
for bl_ts, bl_ords in self._gen_loopblocking_all(wlkey='LGFIL'):
lbs = self._lbs(bl_ts, bl_ords, wlkey='LGFIL', optkey='BYP')
if not lbs.is_valid():
continue
if all(lbs.stored_in_gbuf):
continue
# Model.
access = lbs.get_access()
# Sim.
dram_access, gbuf_access = self._sim_access_conv(lbs)
self.assertListEqual(access[me.DRAM], dram_access,
'test_access_bypass_lgfil: DRAM: '
'model {} vs. sim {}. lbs: {} {}. '
'stored in gbuf {}.'
.format(access[me.DRAM], dram_access,
bl_ts, bl_ords,
lbs.stored_in_gbuf))
self.assertListEqual(access[me.GBUF], gbuf_access,
'test_access_bypass_lgfil: GBUF: '
'model {} vs. sim {}. lbs: {} {}. '
'stored in gbuf {}.'
.format(access[me.GBUF], gbuf_access,
bl_ts, bl_ords,
lbs.stored_in_gbuf))
def test_access_pool(self):
''' get_access for PoolingLayer. '''
for bl_ts, bl_ords in self._gen_loopblocking_all(wlkey='POOL'):
lbs = self._lbs(bl_ts, bl_ords, wlkey='POOL', rsrckey='LG')
self.assertTrue(lbs.is_valid())
self.assertSequenceEqual(bl_ts, lbs.bl_ts)
self.assertSequenceEqual(bl_ords, lbs.bl_ords)
self.assertSequenceEqual(lbs.fetch[0], (1, 1, 1))
self.assertSequenceEqual(lbs.fetch[1], (1, 1, 1))
def test_access_invalid(self):
''' get_access invalid. '''
lbs = self._lbs(self._make_bl_ts((0, 1, 1), (0, 1, 1), (1, 1, 0)),
rsrckey='SM')
self.assertFalse(lbs.is_valid())
self.assertTrue(math.isinf(sum([sum(a) for a in lbs.get_access()])))
def test_top_level_fetch(self):
''' get_top_level_fetch. '''
for bl_ts, bl_ords in self._gen_loopblocking_all():
lbs = self._lbs(bl_ts, bl_ords, rsrckey='LG')
self.assertTrue(lbs.is_valid())
# Top fetch.
top_fetch = lbs.get_top_level_fetch()
# Top access.
top_access = lbs.get_access()[me.DRAM]
self.assertEqual(top_access[de.FIL],
top_fetch[de.FIL]
* self.layer['BASE'].total_filter_size())
self.assertEqual(top_access[de.IFM],
top_fetch[de.IFM]
* self.layer['BASE']
.total_ifmap_size(self.batch_size))
self.assertEqual(top_access[de.OFM],
top_fetch[de.OFM]
* self.layer['BASE']
.total_ofmap_size(self.batch_size))
def test_top_level_fetch_bypass(self):
''' get_top_level_fetch bypass. '''
for bl_ts, bl_ords in self._gen_loopblocking_all():
lbs = self._lbs(bl_ts, bl_ords, rsrckey='LG', optkey='BYP')
self.assertTrue(lbs.is_valid())
if all(lbs.stored_in_gbuf):
continue
# Top fetch.
top_fetch = lbs.get_top_level_fetch()
# Top access.
top_access = lbs.get_access()[me.DRAM]
self.assertEqual(top_access[de.FIL],
top_fetch[de.FIL]
* self.layer['BASE'].total_filter_size())
self.assertEqual(top_access[de.IFM],
top_fetch[de.IFM]
* self.layer['BASE']
.total_ifmap_size(self.batch_size))
self.assertEqual(top_access[de.OFM],
top_fetch[de.OFM]
* self.layer['BASE']
.total_ofmap_size(self.batch_size))
def test_top_level_fetch_invalid(self):
''' get_top_level_fetch invalid. '''
lbs = self._lbs(self._make_bl_ts((0, 1, 1), (0, 1, 1), (1, 1, 0)),
rsrckey='SM')
self.assertFalse(lbs.is_valid())
self.assertIsNone(lbs.get_top_level_fetch())
def test_access_cost(self):
''' get_access_cost. '''
for bl_ts, bl_ords in self._gen_loopblocking_all():
lbs = self._lbs(bl_ts, bl_ords)
if not lbs.is_valid():
continue
access = [sum(a) for a in lbs.get_access()]
cost = lbs.get_access_cost(self.cost)
self.assertAlmostEqual(
cost,
+ sum(a * c for a, c in zip(access, self.cost.mem_hier)))
def test_access_cost_same_lbs(self):
''' get_access_cost same lbs. '''
lbs = self._lbs(self._make_bl_ts((0, 1, 1), (1, 0, 1), (1, 1, 0)),
rsrckey='LG')
self.assertTrue(lbs.is_valid())
c1 = lbs.get_access_cost(Cost(mac_op=1, mem_hier=(200, 6, 2, 1),
noc_hop=50, idl_unit=50))
c2 = lbs.get_access_cost(Cost(mac_op=-1, mem_hier=(-200, -6, -2, -1),
noc_hop=-50, idl_unit=-50))
self.assertAlmostEqual(c1, -c2)
def test_access_cost_invalid(self):
''' get_access_cost invalid. '''
lbs = self._lbs(self._make_bl_ts((0, 1, 1), (0, 1, 1), (1, 1, 0)),
rsrckey='SM')
self.assertFalse(lbs.is_valid())
self.assertTrue(math.isinf(lbs.get_access_cost(self.cost)))
def test_ordered_loops(self):
''' Get ordered_loops. '''
assert list(range(le.NUM)) == [le.IFM, le.OFM, le.BAT]
self.assertListEqual(
LoopBlockingScheme.ordered_loops((3, 5, 2), (2, 0, 1)),
[(le.IFM, 3), (le.BAT, 2), (le.OFM, 5)])
# Trivial loops at different positions.
self.assertListEqual(
LoopBlockingScheme.ordered_loops((3, 5, 1), (0, 1, 2)),
[(le.OFM, 5), (le.IFM, 3)])
self.assertListEqual(
LoopBlockingScheme.ordered_loops((3, 5, 1), (1, 2, 0)),
[(le.OFM, 5), (le.IFM, 3)])
self.assertListEqual(
LoopBlockingScheme.ordered_loops((3, 5, 1), (0, 2, 1)),
[(le.OFM, 5), (le.IFM, 3)])
# Different loops are trivial.
self.assertListEqual(
LoopBlockingScheme.ordered_loops((1, 5, 2), (0, 2, 1)),
[(le.OFM, 5), (le.BAT, 2)])
self.assertListEqual(
LoopBlockingScheme.ordered_loops((3, 1, 2), (0, 2, 1)),
[(le.BAT, 2), (le.IFM, 3)])
# Multiple trivial loops.
self.assertListEqual(
LoopBlockingScheme.ordered_loops((1, 5, 1), (0, 1, 2)),
[(le.OFM, 5)])
self.assertListEqual(
LoopBlockingScheme.ordered_loops((1, 1, 1), (0, 1, 2)),
[])
for bl_t, bl_ord in itertools.product(
itertools.product(*[range(1, 8)] * 3),
itertools.permutations(range(le.NUM))):
ord_loops = LoopBlockingScheme.ordered_loops(bl_t, bl_ord)
self.assertTrue(all(len(tpl) == 2 for tpl in ord_loops))
self.assertFalse(any(tpl[1] <= 1 for tpl in ord_loops))
self.assertEqual(len(ord_loops), le.NUM - bl_t.count(1))
self.assertTrue(all(tpl[1] == bl_t[tpl[0]] for tpl in ord_loops))
rev_loops = LoopBlockingScheme.ordered_loops(bl_t, bl_ord,
reverse=True)
ord_lpes = LoopBlockingScheme.ordered_loops(bl_t, bl_ord,
lpe_only=True)
self.assertEqual(len(rev_loops), len(ord_loops))
self.assertEqual(len(ord_lpes), len(ord_loops))
self.assertListEqual(list(reversed(rev_loops)), ord_loops)
self.assertListEqual([tpl[0] for tpl in ord_loops], ord_lpes)
def test_data_region_fetch(self):
''' PROC type data regions. '''
# Multiple fetches with normal DATA regions.
bl_ts = self._make_bl_ts((0, 1, 1), (0, 1, 1), (0, 1, 1))
bl_ords = [[0] * le.NUM for _ in range(2)]
bl_ords[0][le.IFM] = 1
bl_ords[0][le.OFM] = 2
bl_ords[0][le.BAT] = 0
bl_ords[1] = range(le.NUM)
lbs_norm = self._lbs(bl_ts, bl_ords)
self.assertTrue(lbs_norm.is_valid())
self.assertGreater(lbs_norm.fetch[0][de.IFM], 1)
self.assertGreater(lbs_norm.fetch[0][de.OFM], 1)
lbs = self._lbs(bl_ts, bl_ords, rsrckey='SRCNOTDATA')
self.assertFalse(lbs.is_valid())
lbs = self._lbs(bl_ts, bl_ords, rsrckey='DSTNOTDATA')
self.assertFalse(lbs.is_valid())
# Single top-level fetch.
bl_ts = self._make_bl_ts((1, 0, 1), (1, 0, 1), (1, 0, 1))
lbs_norm = self._lbs(bl_ts, rsrckey='LG')
lbs = self._lbs(bl_ts, rsrckey='SRCNOTDATA')
self.assertTrue(lbs.is_valid())
self.assertLess(lbs.get_access_cost(self.cost),
lbs_norm.get_access_cost(self.cost))
self.assertAlmostEqual(lbs_norm.get_access_cost(self.cost)
- lbs.get_access_cost(self.cost),
lbs.remote_gbuf_access[de.IFM]
* (self.cost.mem_hier_at(me.DRAM)
- self.cost.mem_hier_at(me.GBUF)))
self.assertAlmostEqual(lbs.access[me.DRAM][de.FIL],
lbs_norm.access[me.DRAM][de.FIL])
self.assertAlmostEqual(lbs.access[me.DRAM][de.IFM], 0)
self.assertAlmostEqual(lbs.access[me.DRAM][de.OFM],
lbs_norm.access[me.DRAM][de.OFM])
self.assertAlmostEqual(lbs.access[me.GBUF][de.IFM],
lbs_norm.access[me.GBUF][de.IFM])
self.assertAlmostEqual(lbs.remote_gbuf_access[de.IFM],
lbs_norm.access[me.DRAM][de.IFM])
lbs = self._lbs(bl_ts, bl_ords, rsrckey='DSTNOTDATA')
self.assertTrue(lbs.is_valid())
self.assertLess(lbs.get_access_cost(self.cost),
lbs_norm.get_access_cost(self.cost))
self.assertAlmostEqual(lbs_norm.get_access_cost(self.cost)
- lbs.get_access_cost(self.cost),
lbs.remote_gbuf_access[de.OFM]
* (self.cost.mem_hier_at(me.DRAM)
- self.cost.mem_hier_at(me.GBUF)))
self.assertAlmostEqual(lbs.access[me.DRAM][de.FIL],
lbs_norm.access[me.DRAM][de.FIL])
self.assertAlmostEqual(lbs.access[me.DRAM][de.IFM],
lbs_norm.access[me.DRAM][de.IFM])
self.assertAlmostEqual(lbs.access[me.DRAM][de.OFM], 0)
self.assertAlmostEqual(lbs.access[me.GBUF][de.OFM],
lbs_norm.access[me.GBUF][de.OFM])
self.assertAlmostEqual(lbs.remote_gbuf_access[de.OFM],
lbs_norm.access[me.DRAM][de.OFM])
lbs = self._lbs(bl_ts, bl_ords, rsrckey='DATALOCAL')
self.assertTrue(lbs.is_valid())
self.assertLess(lbs.get_access_cost(self.cost),
lbs_norm.get_access_cost(self.cost))
self.assertAlmostEqual(lbs.access[me.DRAM][de.FIL],
lbs_norm.access[me.DRAM][de.FIL])
self.assertAlmostEqual(lbs.access[me.DRAM][de.IFM], 0)
self.assertAlmostEqual(lbs.access[me.DRAM][de.OFM], 0)
self.assertAlmostEqual(lbs.access[me.GBUF][de.IFM],
lbs_norm.access[me.GBUF][de.IFM])
self.assertAlmostEqual(lbs.access[me.GBUF][de.OFM],
lbs_norm.access[me.GBUF][de.OFM])
self.assertAlmostEqual(lbs.remote_gbuf_access[de.IFM],
lbs_norm.access[me.DRAM][de.IFM])
self.assertAlmostEqual(lbs.remote_gbuf_access[de.OFM],
lbs_norm.access[me.DRAM][de.OFM])
def test_fil_pinning(self):
''' Filter pinning. '''
bl_ts = self._make_bl_ts((1, 0, 1), (1, 0, 1), (0, 1, 1))
bl_ords = [range(le.NUM) for _ in range(2)]
lbs_norm = self._lbs(bl_ts, bl_ords)
self.assertTrue(lbs_norm.is_valid())
self.assertGreater(lbs_norm.fetch[0][de.FIL], 0)
self.assertGreater(lbs_norm.get_access()[0][de.FIL], 0)
lbs = self._lbs(bl_ts, bl_ords, rsrckey='FILPIN')
self.assertTrue(lbs.is_valid())
self.assertEqual(lbs.fetch[0][de.FIL], 0)
self.assertEqual(lbs.get_access()[0][de.FIL], 0)
| |
#! /usr/bin/env python
"""Helper functions for processing Dakota parameter and results files."""
import os
import subprocess
import re
import yaml
import numpy as np
import collections
def is_dakota_installed():
"""Check whether Dakota is installed and in the execution path.
Returns
-------
bool
True if Dakota is callable.
"""
try:
subprocess.check_call(["dakota", "--version"])
except (subprocess.CalledProcessError, OSError):
return False
else:
return True
def which(prog, env=None):
"""Call the OS `which` function.
Parameters
----------
prog : str
The command name.
env : str, optional
An environment variable.
Returns
-------
The path to the command, or None if the command is not found.
"""
prog = os.environ.get(env or prog.upper(), prog)
try:
prog = subprocess.check_output(
["/usr/bin/which", prog], stderr=open("/dev/null", "w")
).strip()
except subprocess.CalledProcessError:
return None
else:
return prog
def which_dakota():
"""Locate the Dakota executable.
Returns
-------
The path to the Dakota executable, or None if Dakota is not found.
"""
return which("dakota")
def add_dyld_library_path():
"""Add the `DYLD_LIBRARY_PATH` environment variable for Dakota."""
try:
dakota_exe = which_dakota()
dakota_dir = os.path.dirname(os.path.dirname(dakota_exe))
os.environ["DYLD_LIBRARY_PATH"] = (
os.path.join(dakota_dir, "bin")
+ os.path.pathsep
+ os.path.join(dakota_dir, "lib")
)
except (AttributeError, TypeError):
return None
def get_response_descriptors(params_file):
"""Extract response descriptors from a Dakota parameters file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of response descriptors for the Dakota experiment.
"""
labels = []
try:
with open(params_file, "r") as fp:
for line in fp:
if re.search("ASV_", line):
labels.append("".join(re.findall(":(\S+)", line)))
except IOError:
return None
else:
return labels
def get_attributes(obj):
"""Get and format the attributes of an object.
Parameters
----------
section
An object that has attributes.
Returns
-------
dict
The object's attributes.
"""
attrs = obj.__dict__.copy()
attrs_fmtd = {}
for key in attrs:
key_fmtd = key.lstrip("_")
attrs_fmtd[key_fmtd] = attrs[key]
return attrs_fmtd
def get_configuration_file(params_file):
"""Extract the configuration filepath from a Dakota parameters file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
str
The path to the configuration file for the Dakota experiment.
"""
with open(params_file, "r") as fp:
for line in fp:
if re.search("AC_1", line):
return line.split("AC_1")[0].strip()
def deserialize(config_file):
"""Load settings from a YAML configuration file.
Returns
-------
dict
Configuration settings in a dict.
"""
with open(config_file, "r") as fp:
return yaml.safe_load(fp)
def compute_statistic(statistic, array):
"""Compute the statistic used in a Dakota response function.
Parameters
----------
statistic : str
A string with the name of the statistic to compute ('mean',
'median', etc.).
array : array_like
An array data structure, such as a numpy array.
Returns
-------
float
The value of the computed statistic.
"""
return np.__getattribute__(statistic)(array)
def write_results(results_file, values, labels):
"""Write a Dakota results file from a set of input values.
Parameters
----------
results_file : str
The path to a Dakota results file.
values : array_like
A list or array of numeric values.
labels : str
A list of labels to attach to the values.
"""
arr_values = np.asarray(values)
arr_labels = np.asarray(labels)
results = np.column_stack((arr_values, arr_labels))
np.savetxt(results_file, results, delimiter="\t", fmt="%s")
def to_iterable(x):
"""Get an iterable version of an input.
Parameters
----------
x
Anything.
Returns
-------
If the input isn't iterable, or is a string, then a tuple; else,
the input.
Notes
-----
Courtesy http://stackoverflow.com/a/6711233/1563298
"""
if isinstance(x, collections.Iterable) and not isinstance(x, str):
return x
else:
return (x,)
def configure_parameters(params):
"""Preprocess Dakota parameters prior to committing to a config file.
Parameters
----------
params : dict
Configuration parameters for a Dakota experiment that map to the
items in the Dakota configuration file, **dakota.yaml**.
Returns
-------
(dict, dict)
An updated dict of Dakota configuration parameters, and a dict
of substitutions used to create the Dakota template ("dtmpl")
file.
"""
try:
params["component"]
except KeyError:
try:
params["plugin"]
except KeyError:
params["component"] = params["plugin"] = ""
else:
params["analysis_driver"] = "dakota_run_plugin"
params["component"] = ""
else:
params["analysis_driver"] = "dakota_run_component"
params["plugin"] = ""
to_check = [
"descriptors",
"response_descriptors",
"response_statistics",
"auxiliary_files",
]
for item in to_check:
try:
if isinstance(params[item], str):
params[item] = [params[item]]
except KeyError:
pass
subs = {}
for item in params["descriptors"]:
subs[item] = "{" + item + "}"
try:
subs["run_duration"] = params["run_duration"]
except KeyError:
pass
return params, subs
| |
from __future__ import absolute_import, print_function
import ast, io, re, os.path, sys, inspect, types, warnings, pickle
from datetime import datetime
from itertools import count as _count
from inspect import isfunction
from time import strptime
from collections import defaultdict
from functools import update_wrapper, wraps
from xml.etree import cElementTree
from copy import deepcopy
import pony
from pony import options
from pony.thirdparty.decorator import decorator as _decorator
if pony.MODE.startswith('GAE-'): localbase = object
else: from threading import local as localbase
class PonyDeprecationWarning(DeprecationWarning):
pass
def deprecated(stacklevel, message):
warnings.warn(message, PonyDeprecationWarning, stacklevel)
warnings.simplefilter('once', PonyDeprecationWarning)
def _improved_decorator(caller, func):
if isfunction(func):
return _decorator(caller, func)
def pony_wrapper(*args, **kwargs):
return caller(func, *args, **kwargs)
return pony_wrapper
def decorator(caller, func=None):
if func is not None:
return _improved_decorator(caller, func)
def new_decorator(func):
return _improved_decorator(caller, func)
if isfunction(caller):
update_wrapper(new_decorator, caller)
return new_decorator
def decorator_with_params(dec):
def parameterized_decorator(*args, **kwargs):
if len(args) == 1 and isfunction(args[0]) and not kwargs:
return decorator(dec(), args[0])
return decorator(dec(*args, **kwargs))
return parameterized_decorator
@decorator
def cut_traceback(func, *args, **kwargs):
if not options.CUT_TRACEBACK:
return func(*args, **kwargs)
try: return func(*args, **kwargs)
except AssertionError: raise
except Exception:
exc_type, exc, tb = sys.exc_info()
full_tb = tb
last_pony_tb = None
try:
while tb.tb_next:
module_name = tb.tb_frame.f_globals['__name__']
if module_name == 'pony' or (module_name is not None # may be None during import
and module_name.startswith('pony.')):
last_pony_tb = tb
tb = tb.tb_next
if last_pony_tb is None: raise
module_name = tb.tb_frame.f_globals.get('__name__') or ''
if module_name.startswith('pony.utils') and tb.tb_frame.f_code.co_name == 'throw':
reraise(exc_type, exc, last_pony_tb)
reraise(exc_type, exc, full_tb)
finally:
del exc, full_tb, tb, last_pony_tb
cut_traceback_depth = 2
if pony.MODE != 'INTERACTIVE':
cut_traceback_depth = 0
def cut_traceback(func):
return func
def reraise(exc_type, exc, tb):
try: raise exc.with_traceback(tb)
finally: del exc, tb
def throw(exc_type, *args, **kwargs):
if isinstance(exc_type, Exception):
assert not args and not kwargs
exc = exc_type
else: exc = exc_type(*args, **kwargs)
exc.__cause__ = None
try:
if not (pony.MODE == 'INTERACTIVE' and options.CUT_TRACEBACK):
raise exc
else:
raise exc # Set "pony.options.CUT_TRACEBACK = False" to see full traceback
finally: del exc
def truncate_repr(s, max_len=100):
s = repr(s)
return s if len(s) <= max_len else s[:max_len-3] + '...'
codeobjects = {}
def get_codeobject_id(codeobject):
codeobject_id = id(codeobject)
if codeobject_id not in codeobjects:
codeobjects[codeobject_id] = codeobject
return codeobject_id
lambda_args_cache = {}
def get_lambda_args(func):
if type(func) is types.FunctionType:
codeobject = func.__code__
cache_key = get_codeobject_id(codeobject)
elif isinstance(func, ast.Lambda):
cache_key = func
else: assert False # pragma: no cover
names = lambda_args_cache.get(cache_key)
if names is not None: return names
if type(func) is types.FunctionType:
if hasattr(inspect, 'signature'):
names, argsname, kwname, defaults = [], None, None, []
for p in inspect.signature(func).parameters.values():
if p.default is not p.empty:
defaults.append(p.default)
if p.kind == p.POSITIONAL_OR_KEYWORD:
names.append(p.name)
elif p.kind == p.VAR_POSITIONAL:
argsname = p.name
elif p.kind == p.VAR_KEYWORD:
kwname = p.name
elif p.kind == p.POSITIONAL_ONLY:
throw(TypeError, 'Positional-only arguments like %s are not supported' % p.name)
elif p.kind == p.KEYWORD_ONLY:
throw(TypeError, 'Keyword-only arguments like %s are not supported' % p.name)
else: assert False
else:
names, argsname, kwname, defaults = inspect.getargspec(func)
elif isinstance(func, ast.Lambda):
argsname = func.args.vararg
kwname = func.args.kwarg
defaults = func.args.defaults + func.args.kw_defaults
names = [arg.arg for arg in func.args.args]
else: assert False # pragma: no cover
if argsname: throw(TypeError, '*%s is not supported' % argsname)
if kwname: throw(TypeError, '**%s is not supported' % kwname)
if defaults: throw(TypeError, 'Defaults are not supported')
lambda_args_cache[cache_key] = names
return names
def error_method(*args, **kwargs):
raise TypeError()
_ident_re = re.compile(r'^[A-Za-z_]\w*\Z')
# is_ident = ident_re.match
def is_ident(string):
'is_ident(string) -> bool'
return bool(_ident_re.match(string))
_name_parts_re = re.compile(r'''
[A-Z][A-Z0-9]+(?![a-z]) # ACRONYM
| [A-Z][a-z]* # Capitalized or single capital
| [a-z]+ # all-lowercase
| [0-9]+ # numbers
| _+ # underscores
''', re.VERBOSE)
def split_name(name):
"split_name('Some_FUNNYName') -> ['Some', 'FUNNY', 'Name']"
if not _ident_re.match(name):
raise ValueError('Name is not correct Python identifier')
list = _name_parts_re.findall(name)
if not (list[0].strip('_') and list[-1].strip('_')):
raise ValueError('Name must not starting or ending with underscores')
return [ s for s in list if s.strip('_') ]
def uppercase_name(name):
"uppercase_name('Some_FUNNYName') -> 'SOME_FUNNY_NAME'"
return '_'.join(s.upper() for s in split_name(name))
def lowercase_name(name):
"uppercase_name('Some_FUNNYName') -> 'some_funny_name'"
return '_'.join(s.lower() for s in split_name(name))
def camelcase_name(name):
"uppercase_name('Some_FUNNYName') -> 'SomeFunnyName'"
return ''.join(s.capitalize() for s in split_name(name))
def mixedcase_name(name):
"mixedcase_name('Some_FUNNYName') -> 'someFunnyName'"
list = split_name(name)
return list[0].lower() + ''.join(s.capitalize() for s in list[1:])
def import_module(name):
"import_module('a.b.c') -> <module a.b.c>"
mod = sys.modules.get(name)
if mod is not None: return mod
mod = __import__(name)
components = name.split('.')
for comp in components[1:]: mod = getattr(mod, comp)
return mod
if sys.platform == 'win32':
_absolute_re = re.compile(r'^(?:[A-Za-z]:)?[\\/]')
else: _absolute_re = re.compile(r'^/')
def is_absolute_path(filename):
return bool(_absolute_re.match(filename))
def absolutize_path(filename, frame_depth):
if is_absolute_path(filename): return filename
code_filename = sys._getframe(frame_depth+1).f_code.co_filename
if not is_absolute_path(code_filename):
if code_filename.startswith('<') and code_filename.endswith('>'):
if pony.MODE == 'INTERACTIVE': raise ValueError(
'When in interactive mode, please provide absolute file path. Got: %r' % filename)
raise EnvironmentError('Unexpected module filename, which is not absolute file path: %r' % code_filename)
code_path = os.path.dirname(code_filename)
return os.path.join(code_path, filename)
def current_timestamp():
return datetime2timestamp(datetime.now())
def datetime2timestamp(d):
result = d.isoformat(' ')
if len(result) == 19: return result + '.000000'
return result
def timestamp2datetime(t):
time_tuple = strptime(t[:19], '%Y-%m-%d %H:%M:%S')
microseconds = int((t[20:26] + '000000')[:6])
return datetime(*(time_tuple[:6] + (microseconds,)))
expr1_re = re.compile(r'''
([A-Za-z_]\w*) # identifier (group 1)
| ([(]) # open parenthesis (group 2)
''', re.VERBOSE)
expr2_re = re.compile(r'''
\s*(?:
(;) # semicolon (group 1)
| (\.\s*[A-Za-z_]\w*) # dot + identifier (group 2)
| ([([]) # open parenthesis or braces (group 3)
)
''', re.VERBOSE)
expr3_re = re.compile(r"""
[()[\]] # parenthesis or braces (group 1)
| '''(?:[^\\]|\\.)*?''' # '''triple-quoted string'''
| \"""(?:[^\\]|\\.)*?\""" # \"""triple-quoted string\"""
| '(?:[^'\\]|\\.)*?' # 'string'
| "(?:[^"\\]|\\.)*?" # "string"
""", re.VERBOSE)
def parse_expr(s, pos=0):
z = 0
match = expr1_re.match(s, pos)
if match is None: raise ValueError()
start = pos
i = match.lastindex
if i == 1: pos = match.end() # identifier
elif i == 2: z = 2 # "("
else: assert False # pragma: no cover
while True:
match = expr2_re.match(s, pos)
if match is None: return s[start:pos], z==1
pos = match.end()
i = match.lastindex
if i == 1: return s[start:pos], False # ";" - explicit end of expression
elif i == 2: z = 2 # .identifier
elif i == 3: # "(" or "["
pos = match.end()
counter = 1
open = match.group(i)
if open == '(': close = ')'
elif open == '[': close = ']'; z = 2
else: assert False # pragma: no cover
while True:
match = expr3_re.search(s, pos)
if match is None: raise ValueError()
pos = match.end()
x = match.group()
if x == open: counter += 1
elif x == close:
counter -= 1
if not counter: z += 1; break
else: assert False # pragma: no cover
def tostring(x):
if isinstance(x, str): return x
if hasattr(x, '__unicode__'):
try: return str(x)
except: pass
if hasattr(x, 'makeelement'): return cElementTree.tostring(x)
try: return str(x)
except: pass
try: return repr(x)
except: pass
if type(x) == types.InstanceType: return '<%s instance at 0x%X>' % (x.__class__.__name__)
return '<%s object at 0x%X>' % (x.__class__.__name__)
def strjoin(sep, strings, source_encoding='ascii', dest_encoding=None):
"Can join mix of str and byte strings in different encodings"
strings = list(strings)
try: return sep.join(strings)
except UnicodeDecodeError: pass
for i, s in enumerate(strings):
if isinstance(s, str):
strings[i] = s.decode(source_encoding, 'replace').replace(u'\ufffd', '?')
result = sep.join(strings)
if dest_encoding is None: return result
return result.encode(dest_encoding, 'replace')
def count(*args, **kwargs):
if kwargs: return _count(*args, **kwargs)
if len(args) != 1: return _count(*args)
arg = args[0]
if hasattr(arg, 'count'): return arg.count()
try: it = iter(arg)
except TypeError: return _count(arg)
return len(set(it))
def avg(iter):
count = 0
sum = 0.0
for elem in iter:
if elem is None: continue
sum += elem
count += 1
if not count: return None
return sum / count
def group_concat(items, sep=','):
if items is None:
return None
return str(sep).join(str(item) for item in items)
def coalesce(*args):
for arg in args:
if arg is not None:
return arg
return None
def distinct(iter):
d = defaultdict(int)
for item in iter:
d[item] = d[item] + 1
return d
def concat(*args):
return ''.join(tostring(arg) for arg in args)
def between(x, a, b):
return a <= x <= b
def is_utf8(encoding):
return encoding.upper().replace('_', '').replace('-', '') in ('UTF8', 'UTF', 'U8')
def _persistent_id(obj):
if obj is Ellipsis:
return "Ellipsis"
def _persistent_load(persid):
if persid == "Ellipsis":
return Ellipsis
raise pickle.UnpicklingError("unsupported persistent object")
def pickle_ast(val):
pickled = io.BytesIO()
pickler = pickle.Pickler(pickled)
pickler.persistent_id = _persistent_id
pickler.dump(val)
return pickled
def unpickle_ast(pickled):
pickled.seek(0)
unpickler = pickle.Unpickler(pickled)
unpickler.persistent_load = _persistent_load
return unpickler.load()
def copy_ast(tree):
return unpickle_ast(pickle_ast(tree))
def _hashable_wrap(func):
@wraps(func, assigned=('__name__', '__doc__'))
def new_func(self, *args, **kwargs):
if getattr(self, '_hash', None) is not None:
assert False, 'Cannot mutate HashableDict instance after the hash value is calculated'
return func(self, *args, **kwargs)
return new_func
class HashableDict(dict):
def __hash__(self):
result = getattr(self, '_hash', None)
if result is None:
result = 0
for key, value in self.items():
result ^= hash(key)
result ^= hash(value)
self._hash = result
return result
def __deepcopy__(self, memo):
if getattr(self, '_hash', None) is not None:
return self
return HashableDict({deepcopy(key, memo): deepcopy(value, memo)
for key, value in self.items()})
__setitem__ = _hashable_wrap(dict.__setitem__)
__delitem__ = _hashable_wrap(dict.__delitem__)
clear = _hashable_wrap(dict.clear)
pop = _hashable_wrap(dict.pop)
popitem = _hashable_wrap(dict.popitem)
setdefault = _hashable_wrap(dict.setdefault)
update = _hashable_wrap(dict.update)
def deref_proxy(value):
t = type(value)
if t.__name__ == 'LocalProxy' and '_get_current_object' in t.__dict__:
# Flask local proxy
value = value._get_current_object()
elif t.__name__ == 'EntityProxy':
# Pony proxy
value = value._get_object()
return value
def deduplicate(value, deduplication_cache):
t = type(value)
try:
return deduplication_cache[t].setdefault(value, value)
except:
return value
| |
__author__ = 'yyekovenko'
from base_rest_helper import *
class InstanceHelper(BaseRESTHelper):
def __init__(self, utils, auth):
"""
Arguments:
- utils: instance of Utils class
- auth: instance of AuthenticationHelper class
"""
super(InstanceHelper, self).__init__(utils)
self.auth = auth
def create_instance(self, parameters=None, attempts=4):
"""
Create instance via Asgard REST API.
Arguments:
- parameters: dict, parameters of instance (see Wiki for details).
Return:
- dict, parameters of just created instance or False if creation failed.
"""
# this nested func is used as a condition to complete waiting (see usage below)
def find_new_instance():
new_ins = [i for i in self.utils.get_list('instances')
if i['name'] == params['name'] and i['status'] != 'Build']
if len(new_ins) == 1:
return new_ins[0]
else:
return False
# The dict enumerates only required parameters of create_instance request
params = {
# DETAILS
'instanceSources': 'IMAGE', # 'IMAGE' for image, 'SNAPSHOT' for snapshot
'image': '', # existing value should be found
'name': '', # generate random value
'flavor': '', # existing value should be found
'datacenter': '', # existing value should be found
'count': 1,
# ACCESS AND SECURITY
'keypair': '', # existing value should be found
'securityGroups': [''], # existing value should be found
# VOLUME OPTIONS
'volumeOptions': 'NOT_BOOT', # 'Do not boot from volume' item
'deviceName': '' # required, empty because not used
#'snapshot': '', # optional
#'deleteOnTerminate': False, # bool, optional
# POST-CREATION
#'customizationScript': '' # optional
}
# apply non-empty user-defined parameters
if parameters is not None:
for k in parameters:
if parameters[k] != "":
params[k] = parameters[k]
# find and insert values for all required params that are still empty.
if params['name'] == "":
instances = self.utils.get_list('instances')
params['name'] = self.utils.generate_string(4, *[i['name'] for i in instances])
if params['image'] == "":
for image in self.utils.get_list('images'):
if image['status'] == 'active':
params['image'] = image['id']
break
else:
raise AssertionError("Unable to find active image for instance creation.")
if params['keypair'] == "":
pairslist = self.utils.get_list('keypairs')
ok_(len(pairslist) > 0, "Unable to find any keypair for instance creation.")
params['keypair'] = pairslist[0]['name']
if params['securityGroups'] == ['']:
groups = self.utils.get_list('securitygroups')
ok_(len(groups) > 0, "Unable to find sec.group for instance creation.")
params['securityGroups'] = [groups[0]['name']]
if params['flavor'] == "":
flavors = self.utils.get_list('flavors')
mindisk = min([int(i['disk']) for i in flavors if int(i['disk']) > 0])
for f in flavors:
if int(f['disk']) == mindisk:
params['flavor'] = f['id']
break
if params['datacenter'] == "":
dc = self.auth.get_datacenters()
ok_(len(dc) > 0, "Unable to find any datacenter for instance creation.")
params['datacenter'] = dc[0]
# launch instance creation and verify result.
# multiple attempts to create instance - to avoid false-negative test results.
for attempt in range(attempts):
print("\n=== Instance creation. Attempt # %s. ===\n" % attempt)
# try to create instance. wait for result.
self.utils.send_request("POST", 'create_instance', data=params)
instance_created = self.utils.waitfor(find_new_instance, 180, 5)
if instance_created:
instance = find_new_instance()
else:
print('Timeout for instance creation expired.')
if not instance_created or instance['status'] != 'Active': # status can be 'Active' or 'Error'
print("\nAttempt #" + str(attempt) + " to create 'active' instance failed." +
"\nThe following instances exist at the moment:\n" +
'\n'.join(i['name'] + ': ' + i['status'] for i in self.utils.get_list('instances')) +
"\nCleaning up...")
# If unable to create instance, remove all instances except two ones.
# If lab limit for instances is changed, modify remain value accordingly.
self.utils.cleanup_objects(self.terminate_instances, 'instances', id_key='instanceId', remain=2)
else:
print('Instance created successfully.')
return instance
ok_(False, "Unable to create instance with 'Active' state or timeout expired.")
def terminate_instances(self, list_of_ids):
"""
Arguments:
- list_of_ids: list of strings - instance ids.
Return:
- bool: True if success.
"""
params = {'selectedInstances': list_of_ids}
res = self.utils.send_request('POST', 'terminate_instances', data=params)
# make sure the instances are not in the list anymore.
condition = lambda: len([i for i in self.utils.get_list('instances') if i['instanceId'] in list_of_ids]) == 0
return self.utils.waitfor(condition, 180, 3)
def show_instance(self, id):
"""
Arguments:
- id: string
Return:
- JSON dict with instance parameters
"""
params = {'id': id}
res = self.utils.send_request('GET', 'show_instance', data=params)
return json.loads(res.content)['instance']
def pause_unpause(self, ins_id, do_pause=True):
"""
Arguments:
- ins_id: string
- do_pause: bool, True to pause, False to unpause instance.
Return:
- JSON structure from response content field.
"""
action = 'un' * (not do_pause) + 'pause_instance'
params = {'instanceId': ins_id}
res = self.utils.send_request('GET', action, data=params)
return json.loads(res.content)
def suspend_resume(self, ins_id, do_suspend=True):
"""
Arguments:
- ins_id: string
- do_suspend: bool, True to suspend, False to resume instance.
Return:
- JSON structure from response content field.
"""
action = 'suspend_instance' if do_suspend else 'resume_instance'
params = {'instanceId': ins_id}
res = self.utils.send_request('GET', action, data=params)
return json.loads(res.content)
def reboot_instance(self, ins_id):
"""
Arguments:
- ins_id: string
Return:
- JSON structure from response content field.
"""
params = {'instanceId': ins_id}
res = self.utils.send_request('POST', 'reboot_instance', data=params)
return json.loads(res.content)
def update_instance(self, ins_id, new_name):
"""
Arguments:
- ins_id: string
- new_name: str, new name for instance with ins_id.
Return:
- JSON structure from response content field.
"""
params = {'id': ins_id, 'name': new_name}
res = self.utils.send_request('POST', 'update_instance', data=params)
return json.loads(res.content)['resp']['server']
def make_snapshot(self, ins_id, snap_name):
"""
Create snapshot for instance.
Arguments:
- ins_id: string
- snap_name: string
Return:
- JSON structure from response content field.
"""
params = {'id': ins_id, 'name': snap_name}
res = self.utils.send_request('POST', 'make_snapshot', data=params)
return json.loads(res.content)
def show_log(self, ins_id, showall=None):
"""
Arguments:
- ins_id: string
- showall: string, optional, number of lines to be shown.
Return:
- JSON structure from response content field.
"""
params = {'instanceId': ins_id, 'showAll': showall}
res = self.utils.send_request('POST', 'show_log', data=params)
return json.loads(res.content)
class ImageHelper(BaseRESTHelper):
def delete_image(self, id):
""" Delete image or instance snapshot. """
params = {'id': id}
res = self.utils.send_request('POST', 'delete_image', data=params)
# make sure the image is not in the list anymore.
def check_image_deleted():
# collect images and snapshots
img_snp = self.utils.get_list('images') + self.utils.get_list('instance_snapshots')
if len([i for i in img_snp if i['id'] == id]) == 0:
return True
return self.utils.waitfor(check_image_deleted, 180, 2)
def create_image(self, parameters=None):
"""
For now the method creates image on base of fake URL.
If 'real' image is needed - use create_image_via_cli() method.
"""
params = {
'name': '',
'location': "https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img",
#'location': 'http://172.18.94.4/cirros2.img',
'diskFormat': 'qcow2',
'minDisk': 1,
'minRam': 128,
'shared': 'on'
}
if parameters is not None:
for k in parameters:
if parameters[k] != "":
params[k] = parameters[k]
if params['name'] == '':
images = self.utils.get_list('images')
params['name'] = self.utils.generate_string(4, *[i['name'] for i in images])
res = self.utils.send_request("POST", 'create_image', data=params)
# return new image or raise exception if not created.
def find_new_image():
new_img = [i for i in self.utils.get_list('images')
if i['name'] == params['name'] and i['status'] == 'active']
if len(new_img) == 1:
return new_img[0]
else:
return False
ok_(self.utils.waitfor(find_new_image, 180, 2), "Creation of image with 'active' status failed.")
return find_new_image()
def create_image_via_cli(self, name):
"""
Create image via glance CLI (it's not part of Asgard functionality).
Method expects to find image in the root directory.
Return:
- new image id or False if creation failed.
"""
command = ("glance image-create --disk-format=qcow2 "
"--container-format=bare --is-public=yes "
"--name %s < /root/cirros-0.3.0-i386-disk.img" % name)
res = self.utils.run_ssh_cmd(command)
for s in res:
parsed = s.replace('|', ' ').split()
if 'id' in parsed:
return parsed[1]
return False
def show_image(self, id):
params = {'id': id}
res = self.utils.send_request('GET', 'show_image', data=params)
return json.loads(res.content)['image']
def update_image(self, params):
"""
Arguments:
params dict contains the following keys:
- id (string, required).
- name (string, required) - new or old one.
- shared: optional, 'on' or 'off'.
Return:
- JSON structure from response content field.
"""
res = self.utils.send_request('POST', "update_image", data=params)
return json.loads(res.content)['image']
class FlavorHelper(BaseRESTHelper):
def create_flavor(self, parameters=None):
params = {
'name': '',
'ram': 128,
'disk': 1,
'vcpus': 2,
'isPublic': 'on', # 'on' or 'off'
'ephemeral': 1, # optional
'swap': 1, # optional
'rxtxFactor': 1 # optional
}
if parameters is not None:
for k in parameters:
if parameters[k] != "":
params[k] = parameters[k]
if params['name'] == '':
flrs = self.utils.get_list('flavors')
params['name'] = self.utils.generate_string(4, *[f['name'] for f in flrs])
res = self.utils.send_request("POST", 'create_flavor', data=params)
return json.loads(res.content)['resp']['flavor']
def delete_flavor(self, id):
params = {'flavorId': id}
res = self.utils.send_request('POST', 'delete_flavor', data=params)
return json.loads(res.content)
| |
# Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import operator
from functools import reduce
from django.db import models
from django.utils.encoding import force_text
from rest_framework.compat import distinct
from rest_framework.filters import (OrderingFilter as BaseOrderingFilter,
SearchFilter as BaseSearchFilter)
from .compat import six
class SearchFilter(BaseSearchFilter):
@staticmethod
def get_valid_fields(queryset, view, context=None):
#pylint:disable=protected-access,unused-argument
model_fields = {
field.name for field in queryset.model._meta.get_fields()}
base_fields = getattr(view, 'search_fields', [])
valid_fields = tuple([
field for field in base_fields if field in model_fields])
return valid_fields
def filter_queryset(self, request, queryset, view):
search_fields = self.get_valid_fields(queryset, view)
search_terms = self.get_search_terms(request)
if not search_fields or not search_terms:
return queryset
orm_lookups = [
self.construct_search(six.text_type(search_field))
for search_field in search_fields
]
base = queryset
conditions = []
for search_term in search_terms:
queries = [
models.Q(**{orm_lookup: search_term})
for orm_lookup in orm_lookups
]
conditions.append(reduce(operator.or_, queries))
queryset = queryset.filter(reduce(operator.and_, conditions))
if self.must_call_distinct(queryset, search_fields):
# Filtering against a many-to-many field requires us to
# call queryset.distinct() in order to avoid duplicate items
# in the resulting queryset.
# We try to avoid this if possible, for performance reasons.
queryset = distinct(queryset, base)
return queryset
def get_schema_operation_parameters(self, view):
search_fields = getattr(view, 'search_fields', [])
search_fields_description = "search for matching text in %s" % (
', '.join(search_fields))
return [
{
'name': self.search_param,
'required': False,
'in': 'query',
'description': force_text(search_fields_description),
'schema': {
'type': 'string',
},
},
]
class OrderingFilter(BaseOrderingFilter):
def get_valid_fields(self, queryset, view, context=None):
#pylint:disable=protected-access
model_fields = {
field.name for field in queryset.model._meta.get_fields()}
base_fields = super(OrderingFilter, self).get_valid_fields(
queryset, view, context=context if context else {})
valid_fields = tuple([
field for field in base_fields if field[0] in model_fields])
return valid_fields
def get_ordering(self, request, queryset, view):
#pylint:disable=protected-access
ordering = None
params = request.query_params.get(self.ordering_param)
if params:
fields = [param.strip() for param in params.split(',')]
if 'created_at' in fields or '-created_at' in fields:
model_fields = {
field.name for field in queryset.model._meta.get_fields()}
if 'date_joined' in model_fields:
fields = ['date_joined' if field == 'created_at' else (
'-date_joined' if field == '-created_at' else field)
for field in fields]
ordering = self.remove_invalid_fields(
queryset, fields, view, request)
if not ordering:
# We use an alternate ordering if the fields are not present
# in the second model.
# (ex: Organization.full_name vs. User.first_name)
ordering = self.remove_invalid_fields(
queryset, self.get_default_ordering(view), view, request)
if not ordering:
ordering = view.alternate_ordering
return ordering
def get_schema_operation_parameters(self, view):
# validating presence of coreapi and coreschema
super(OrderingFilter, self).get_schema_fields(view)
ordering_fields = getattr(view, 'ordering_fields', [])
sort_fields_description = "sort by %s. If a field is preceded by"\
" a minus sign ('-'), the order will be reversed. Multiple 'o'"\
" parameters can be specified to produce a stable"\
" result." % ', '.join([field[1] for field in ordering_fields])
return [
{
'name': self.ordering_param,
'required': False,
'in': 'query',
'description': force_text(sort_fields_description),
'schema': {
'type': 'string',
},
},
]
class SortableSearchableFilterBackend(object):
def __init__(self, sort_fields, search_fields):
self.sort_fields = sort_fields
self.search_fields = search_fields
def __call__(self):
return self
def filter_queryset(self, request, queryset, view):
#pylint:disable=no-self-use,unused-argument
return queryset
def get_schema_operation_parameters(self, view):
search_fields = getattr(view, 'search_fields', [])
search_fields_description = "search for matching text in %s" % (
', '.join(search_fields))
ordering_fields = getattr(view, 'ordering_fields', [])
sort_fields_description = "sort by %s. If a field is preceded by"\
"a minus sign ('-'), the order will be reversed. Multiple 'o'"\
" parameters can be specified to produce a stable"\
" result." % ', '.join([field[1] for field in ordering_fields])
return [
{
'name': self.search_param,
'required': False,
'in': 'query',
'description': force_text(search_fields_description),
'schema': {
'type': 'string',
},
},
{
'name': self.ordering_param,
'required': False,
'in': 'query',
'description': force_text(sort_fields_description),
'schema': {
'type': 'string',
},
}
]
class SortableDateRangeSearchableFilterBackend(SortableSearchableFilterBackend):
# def __init__(self, sort_fields, search_fields):
# super(SortableDateRangeSearchableFilterBackend, self).__init__(
# sort_fields, search_fields)
def get_schema_operation_parameters(self, view):
fields = super(SortableDateRangeSearchableFilterBackend,
self).get_schema_operation_parameters(view)
fields += [
{
'name': 'start_at',
'required': False,
'in': 'query',
'description': force_text("date/time in ISO format"\
" after which records were created."),
'schema': {
'type': 'string',
},
},
{
'name': 'ends_at',
'required': False,
'in': 'query',
'description': force_text("date/time in ISO format"\
" before which records were created."),
'schema': {
'type': 'string',
},
}
]
return fields
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.vision.v1p1beta1 ImageAnnotator API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.vision_v1p1beta1.gapic import enums
from google.cloud.vision_v1p1beta1.gapic import image_annotator_client_config
from google.cloud.vision_v1p1beta1.gapic.transports import (
image_annotator_grpc_transport,
)
from google.cloud.vision_v1p1beta1.proto import image_annotator_pb2
from google.cloud.vision_v1p1beta1.proto import image_annotator_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-vision").version
class ImageAnnotatorClient(object):
"""
Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
SERVICE_ADDRESS = "vision.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.vision.v1p1beta1.ImageAnnotator"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ImageAnnotatorClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.ImageAnnotatorGrpcTransport,
Callable[[~.Credentials, type], ~.ImageAnnotatorGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = image_annotator_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=image_annotator_grpc_transport.ImageAnnotatorGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = image_annotator_grpc_transport.ImageAnnotatorGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def batch_annotate_images(
self,
requests,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Run image detection and annotation for a batch of images.
Example:
>>> from google.cloud import vision_v1p1beta1
>>>
>>> client = vision_v1p1beta1.ImageAnnotatorClient()
>>>
>>> # TODO: Initialize `requests`:
>>> requests = []
>>>
>>> response = client.batch_annotate_images(requests)
Args:
requests (list[Union[dict, ~google.cloud.vision_v1p1beta1.types.AnnotateImageRequest]]): Individual image annotation requests for this batch.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.vision_v1p1beta1.types.AnnotateImageRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.vision_v1p1beta1.types.BatchAnnotateImagesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_annotate_images" not in self._inner_api_calls:
self._inner_api_calls[
"batch_annotate_images"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_annotate_images,
default_retry=self._method_configs["BatchAnnotateImages"].retry,
default_timeout=self._method_configs["BatchAnnotateImages"].timeout,
client_info=self._client_info,
)
request = image_annotator_pb2.BatchAnnotateImagesRequest(requests=requests)
return self._inner_api_calls["batch_annotate_images"](
request, retry=retry, timeout=timeout, metadata=metadata
)
| |
# -*- coding: utf-8 -*-
"""
Flask Extension Tests
~~~~~~~~~~~~~~~~~~~~~
Tests the Flask extensions.
:copyright: (c) 2016 by Ali Afshar.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import shutil
import urllib2
import tempfile
import subprocess
import argparse
from flask import json
from setuptools.package_index import PackageIndex
from setuptools.archive_util import unpack_archive
flask_svc_url = 'http://flask.pocoo.org/extensions/'
# OS X has awful paths when using mkstemp or gettempdir(). I don't
# care about security or clashes here, so pick something that is
# actually memorable.
if sys.platform == 'darwin':
_tempdir = '/private/tmp'
else:
_tempdir = tempfile.gettempdir()
tdir = _tempdir + '/flaskext-test'
flaskdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# virtualenv hack *cough*
os.environ['PYTHONDONTWRITEBYTECODE'] = ''
RESULT_TEMPATE = u'''\
<!doctype html>
<title>Flask-Extension Test Results</title>
<style type=text/css>
body { font-family: 'Georgia', serif; font-size: 17px; color: #000; }
a { color: #004B6B; }
a:hover { color: #6D4100; }
h1, h2, h3 { font-family: 'Garamond', 'Georgia', serif; font-weight: normal; }
h1 { font-size: 30px; margin: 15px 0 5px 0; }
h2 { font-size: 24px; margin: 15px 0 5px 0; }
h3 { font-size: 19px; margin: 15px 0 5px 0; }
textarea, code,
pre { font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono',
'Bitstream Vera Sans Mono', monospace!important; font-size: 15px;
background: #eee; }
pre { padding: 7px 15px; line-height: 1.3; }
p { line-height: 1.4; }
table { border: 1px solid black; border-collapse: collapse;
margin: 15px 0; }
td, th { border: 1px solid black; padding: 4px 10px;
text-align: left; }
th { background: #eee; font-weight: normal; }
tr.success { background: #D3F5CC; }
tr.failed { background: #F5D2CB; }
</style>
<h1>Flask-Extension Test Results</h1>
<p>
This page contains the detailed test results for the test run of
all {{ 'approved' if approved }} Flask extensions.
<h2>Summary</h2>
<table class=results>
<thead>
<tr>
<th>Extension
<th>Version
<th>Author
<th>License
<th>Outcome
{%- for iptr, _ in results[0].logs|dictsort %}
<th>{{ iptr }}
{%- endfor %}
</tr>
</thead>
<tbody>
{%- for result in results %}
{% set outcome = 'success' if result.success else 'failed' %}
<tr class={{ outcome }}>
<th>{{ result.name }}
<td>{{ result.version }}
<td>{{ result.author }}
<td>{{ result.license }}
<td>{{ outcome }}
{%- for iptr, _ in result.logs|dictsort %}
<td><a href="#{{ result.name }}-{{ iptr }}">see log</a>
{%- endfor %}
</tr>
{%- endfor %}
</tbody>
</table>
<h2>Test Logs</h2>
<p>Detailed test logs for all tests on all platforms:
{%- for result in results %}
{%- for iptr, log in result.logs|dictsort %}
<h3 id="{{ result.name }}-{{ iptr }}">
{{ result.name }} - {{ result.version }} [{{ iptr }}]</h3>
<pre>{{ log }}</pre>
{%- endfor %}
{%- endfor %}
'''
def log(msg, *args):
print '[EXTTEST]', msg % args
class TestResult(object):
def __init__(self, name, folder, statuscode, interpreters):
intrptr = os.path.join(folder, '.tox/%s/bin/python'
% interpreters[0])
self.statuscode = statuscode
self.folder = folder
self.success = statuscode == 0
def fetch(field):
try:
c = subprocess.Popen([intrptr, 'setup.py',
'--' + field], cwd=folder,
stdout=subprocess.PIPE)
return c.communicate()[0].strip()
except OSError:
return '?'
self.name = name
self.license = fetch('license')
self.author = fetch('author')
self.version = fetch('version')
self.logs = {}
for interpreter in interpreters:
logfile = os.path.join(folder, '.tox/%s/log/test.log'
% interpreter)
if os.path.isfile(logfile):
self.logs[interpreter] = open(logfile).read()
else:
self.logs[interpreter] = ''
def create_tdir():
try:
shutil.rmtree(tdir)
except Exception:
pass
os.mkdir(tdir)
def package_flask():
distfolder = tdir + '/.flask-dist'
c = subprocess.Popen(['python', 'setup.py', 'sdist', '--formats=gztar',
'--dist', distfolder], cwd=flaskdir)
c.wait()
return os.path.join(distfolder, os.listdir(distfolder)[0])
def get_test_command(checkout_dir):
if os.path.isfile(checkout_dir + '/Makefile'):
return 'make test'
return 'python setup.py test'
def fetch_extensions_list():
req = urllib2.Request(flask_svc_url, headers={'accept':'application/json'})
d = urllib2.urlopen(req).read()
data = json.loads(d)
for ext in data['extensions']:
yield ext
def checkout_extension(name):
log('Downloading extension %s to temporary folder', name)
root = os.path.join(tdir, name)
os.mkdir(root)
checkout_path = PackageIndex().download(name, root)
unpack_archive(checkout_path, root)
path = None
for fn in os.listdir(root):
path = os.path.join(root, fn)
if os.path.isdir(path):
break
log('Downloaded to %s', path)
return path
tox_template = """[tox]
envlist=%(env)s
[testenv]
deps=
%(deps)s
distribute
py
commands=bash flaskext-runtest.sh {envlogdir}/test.log
downloadcache=%(cache)s
"""
def create_tox_ini(checkout_path, interpreters, flask_dep):
tox_path = os.path.join(checkout_path, 'tox-flask-test.ini')
if not os.path.exists(tox_path):
with open(tox_path, 'w') as f:
f.write(tox_template % {
'env': ','.join(interpreters),
'cache': tdir,
'deps': flask_dep
})
return tox_path
def iter_extensions(only_approved=True):
for ext in fetch_extensions_list():
if ext['approved'] or not only_approved:
yield ext['name']
def test_extension(name, interpreters, flask_dep):
checkout_path = checkout_extension(name)
log('Running tests with tox in %s', checkout_path)
# figure out the test command and write a wrapper script. We
# can't write that directly into the tox ini because tox does
# not invoke the command from the shell so we have no chance
# to pipe the output into a logfile. The /dev/null hack is
# to trick py.test (if used) into not guessing widths from the
# invoking terminal.
test_command = get_test_command(checkout_path)
log('Test command: %s', test_command)
f = open(checkout_path + '/flaskext-runtest.sh', 'w')
f.write(test_command + ' &> "$1" < /dev/null\n')
f.close()
# if there is a tox.ini, remove it, it will cause troubles
# for us. Remove it if present, we are running tox ourselves
# afterall.
create_tox_ini(checkout_path, interpreters, flask_dep)
rv = subprocess.call(['tox', '-c', 'tox-flask-test.ini'], cwd=checkout_path)
return TestResult(name, checkout_path, rv, interpreters)
def run_tests(extensions, interpreters):
results = {}
create_tdir()
log('Packaging Flask')
flask_dep = package_flask()
log('Running extension tests')
log('Temporary Environment: %s', tdir)
for name in extensions:
log('Testing %s', name)
result = test_extension(name, interpreters, flask_dep)
if result.success:
log('Extension test succeeded')
else:
log('Extension test failed')
results[name] = result
return results
def render_results(results, approved):
from jinja2 import Template
items = results.values()
items.sort(key=lambda x: x.name.lower())
rv = Template(RESULT_TEMPATE, autoescape=True).render(results=items,
approved=approved)
fd, filename = tempfile.mkstemp(suffix='.html')
os.fdopen(fd, 'w').write(rv.encode('utf-8') + '\n')
return filename
def main():
parser = argparse.ArgumentParser(description='Runs Flask extension tests')
parser.add_argument('--all', dest='all', action='store_true',
help='run against all extensions, not just approved')
parser.add_argument('--browse', dest='browse', action='store_true',
help='show browser with the result summary')
parser.add_argument('--env', dest='env', default='py25,py26,py27',
help='the tox environments to run against')
parser.add_argument('--extension=', dest='extension', default=None,
help='tests a single extension')
args = parser.parse_args()
if args.extension is not None:
only_approved = False
extensions = [args.extension]
else:
only_approved = not args.all
extensions = iter_extensions(only_approved)
results = run_tests(extensions, [x.strip() for x in args.env.split(',')])
filename = render_results(results, only_approved)
if args.browse:
import webbrowser
webbrowser.open('file:///' + filename.lstrip('/'))
print 'Results written to', filename
if __name__ == '__main__':
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InboundNatRulesOperations(object):
"""InboundNatRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.InboundNatRuleListResult"]
"""Gets all the inbound nat rules in a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundNatRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_02_01.models.InboundNatRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InboundNatRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.InboundNatRule"
"""Gets the specified load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: InboundNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.InboundNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
inbound_nat_rule_parameters, # type: "_models.InboundNatRule"
**kwargs # type: Any
):
# type: (...) -> "_models.InboundNatRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(inbound_nat_rule_parameters, 'InboundNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
inbound_nat_rule_name, # type: str
inbound_nat_rule_parameters, # type: "_models.InboundNatRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.InboundNatRule"]
"""Creates or updates a load balancer inbound nat rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param inbound_nat_rule_name: The name of the inbound nat rule.
:type inbound_nat_rule_name: str
:param inbound_nat_rule_parameters: Parameters supplied to the create or update inbound nat
rule operation.
:type inbound_nat_rule_parameters: ~azure.mgmt.network.v2018_02_01.models.InboundNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either InboundNatRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_02_01.models.InboundNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
inbound_nat_rule_name=inbound_nat_rule_name,
inbound_nat_rule_parameters=inbound_nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('InboundNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'inboundNatRuleName': self._serialize.url("inbound_nat_rule_name", inbound_nat_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName}'} # type: ignore
| |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Selection classes.
Represents an enumeration using a widget.
"""
from collections import Mapping, Iterable
try:
from itertools import izip
except ImportError: #python3.x
izip = zip
from itertools import chain
from .widget_description import DescriptionWidget
from .valuewidget import ValueWidget
from .widget_core import CoreWidget
from .widget_style import Style
from .trait_types import InstanceDict
from .widget import register, widget_serialization
from traitlets import (Unicode, Bool, Int, Any, Dict, TraitError, CaselessStrEnum,
Tuple, List, Union, observe, validate)
from ipython_genutils.py3compat import unicode_type
_doc_snippets = {}
_doc_snippets['selection_params'] = """
options: list or dict
The options for the dropdown. This can either be a list of values, e.g.
``['Galileo', 'Brahe', 'Hubble']`` or ``[0, 1, 2]``, a list of
(label, value) pairs, e.g.
``[('Galileo', 0), ('Brahe', 1), ('Hubble', 2)]``,
or a dictionary mapping the labels to the values, e.g. ``{'Galileo': 0,
'Brahe': 1, 'Hubble': 2}``. The labels are the strings that will be
displayed in the UI, representing the actual Python choices, and should
be unique. If this is a dictionary, the order in which they are
displayed is not guaranteed.
index: int
The index of the current selection.
value: any
The value of the current selection. When programmatically setting the
value, a reverse lookup is performed among the options to check that
the value is valid. The reverse lookup uses the equality operator by
default, but another predicate may be provided via the ``equals``
keyword argument. For example, when dealing with numpy arrays, one may
set ``equals=np.array_equal``.
label: str
The label corresponding to the selected value.
disabled: bool
Whether to disable user changes.
description: str
Label for this input group. This should be a string
describing the widget.
"""
_doc_snippets['multiple_selection_params'] = """
options: dict or list
The options for the dropdown. This can either be a list of values, e.g.
``['Galileo', 'Brahe', 'Hubble']`` or ``[0, 1, 2]``, a list of
(label, value) pairs, e.g.
``[('Galileo', 0), ('Brahe', 1), ('Hubble', 2)]``,
or a dictionary mapping the labels to the values, e.g. ``{'Galileo': 0,
'Brahe': 1, 'Hubble': 2}``. The labels are the strings that will be
displayed in the UI, representing the actual Python choices, and should
be unique. If this is a dictionary, the order in which they are
displayed is not guaranteed.
index: iterable of int
The indices of the options that are selected.
value: iterable
The values that are selected. When programmatically setting the
value, a reverse lookup is performed among the options to check that
the value is valid. The reverse lookup uses the equality operator by
default, but another predicate may be provided via the ``equals``
keyword argument. For example, when dealing with numpy arrays, one may
set ``equals=np.array_equal``.
label: iterable of str
The labels corresponding to the selected value.
disabled: bool
Whether to disable user changes.
description: str
Label for this input group. This should be a string
describing the widget.
"""
_doc_snippets['slider_params'] = """
orientation: str
Either ``'horizontal'`` or ``'vertical'``. Defaults to ``horizontal``.
readout: bool
Display the current label next to the slider. Defaults to ``True``.
continuous_update: bool
If ``True``, update the value of the widget continuously as the user
holds the slider. Otherwise, the model is only updated after the
user has released the slider. Defaults to ``True``.
"""
def _doc_subst(cls):
""" Substitute format strings in class docstring """
# Strip the snippets to avoid trailing new lines and whitespace
stripped_snippets = {
key: snippet.strip() for (key, snippet) in _doc_snippets.items()
}
cls.__doc__ = cls.__doc__.format(**stripped_snippets)
return cls
def _make_options(x):
"""Standardize the options tuple format.
The returned tuple should be in the format (('label', value), ('label', value), ...).
The input can be
* a Mapping of labels to values
* an iterable of (label, value) pairs
* an iterable of values, and labels will be generated
"""
# Check if x is a mapping of labels to values
if isinstance(x, Mapping):
return tuple((unicode_type(k), v) for k, v in x.items())
# only iterate once through the options.
xlist = tuple(x)
# Check if x is an iterable of (label, value) pairs
if all((isinstance(i, (list, tuple)) and len(i) == 2) for i in xlist):
return tuple((unicode_type(k), v) for k, v in xlist)
# Otherwise, assume x is an iterable of values
return tuple((unicode_type(i), i) for i in xlist)
def findvalue(array, value, compare = lambda x, y: x == y):
"A function that uses the compare function to return a value from the list."
try:
return next(x for x in array if compare(x, value))
except StopIteration:
raise ValueError('%r not in array'%value)
class _Selection(DescriptionWidget, ValueWidget, CoreWidget):
"""Base class for Selection widgets
``options`` can be specified as a list of values, list of (label, value)
tuples, or a dict of {label: value}. The labels are the strings that will be
displayed in the UI, representing the actual Python choices, and should be
unique. If labels are not specified, they are generated from the values.
When programmatically setting the value, a reverse lookup is performed
among the options to check that the value is valid. The reverse lookup uses
the equality operator by default, but another predicate may be provided via
the ``equals`` keyword argument. For example, when dealing with numpy arrays,
one may set equals=np.array_equal.
"""
value = Any(None, help="Selected value", allow_none=True)
label = Unicode(None, help="Selected label", allow_none=True)
index = Int(None, help="Selected index", allow_none=True).tag(sync=True)
options = Any((),
help="""Iterable of values, (label, value) pairs, or a mapping of {label: value} pairs that the user can select.
The labels are the strings that will be displayed in the UI, representing the
actual Python choices, and should be unique.
""")
_options_full = None
# This being read-only means that it cannot be changed from the frontend!
_options_labels = Tuple(read_only=True, help="The labels for the options.").tag(sync=True)
disabled = Bool(help="Enable or disable user changes").tag(sync=True)
def __init__(self, *args, **kwargs):
self.equals = kwargs.pop('equals', lambda x, y: x == y)
# We have to make the basic options bookkeeping consistent
# so we don't have errors the first time validators run
self._initializing_traits_ = True
options = _make_options(kwargs.get('options', ()))
self._options_full = options
self.set_trait('_options_labels', tuple(i[0] for i in options))
self._options_values = tuple(i[1] for i in options)
# Select the first item by default, if we can
if 'index' not in kwargs and 'value' not in kwargs and 'label' not in kwargs:
nonempty = (len(options) > 0)
kwargs['index'] = 0 if nonempty else None
kwargs['label'], kwargs['value'] = options[0] if nonempty else (None, None)
super(_Selection, self).__init__(*args, **kwargs)
self._initializing_traits_ = False
@validate('options')
def _validate_options(self, proposal):
# if an iterator is provided, exhaust it
if isinstance(proposal.value, Iterable) and not isinstance(proposal.value, Mapping):
proposal.value = tuple(proposal.value)
# throws an error if there is a problem converting to full form
self._options_full = _make_options(proposal.value)
return proposal.value
@observe('options')
def _propagate_options(self, change):
"Set the values and labels, and select the first option if we aren't initializing"
options = self._options_full
self.set_trait('_options_labels', tuple(i[0] for i in options))
self._options_values = tuple(i[1] for i in options)
if self._initializing_traits_ is not True:
self.index = 0 if len(options) > 0 else None
@validate('index')
def _validate_index(self, proposal):
if proposal.value is None or 0 <= proposal.value < len(self._options_labels):
return proposal.value
else:
raise TraitError('Invalid selection: index out of bounds')
@observe('index')
def _propagate_index(self, change):
"Propagate changes in index to the value and label properties"
label = self._options_labels[change.new] if change.new is not None else None
value = self._options_values[change.new] if change.new is not None else None
if self.label is not label:
self.label = label
if self.value is not value:
self.value = value
@validate('value')
def _validate_value(self, proposal):
value = proposal.value
try:
return findvalue(self._options_values, value, self.equals) if value is not None else None
except ValueError:
raise TraitError('Invalid selection: value not found')
@observe('value')
def _propagate_value(self, change):
index = self._options_values.index(change.new) if change.new is not None else None
if self.index != index:
self.index = index
@validate('label')
def _validate_label(self, proposal):
if (proposal.value is not None) and (proposal.value not in self._options_labels):
raise TraitError('Invalid selection: label not found')
return proposal.value
@observe('label')
def _propagate_label(self, change):
index = self._options_labels.index(change.new) if change.new is not None else None
if self.index != index:
self.index = index
def _repr_keys(self):
keys = super(_Selection, self)._repr_keys()
# Include options manually, as it isn't marked as synced:
for key in sorted(chain(keys, ('options',))):
if key == 'index' and self.index == 0:
# Index 0 is default when there are options
continue
yield key
class _MultipleSelection(DescriptionWidget, ValueWidget, CoreWidget):
"""Base class for multiple Selection widgets
``options`` can be specified as a list of values, list of (label, value)
tuples, or a dict of {label: value}. The labels are the strings that will be
displayed in the UI, representing the actual Python choices, and should be
unique. If labels are not specified, they are generated from the values.
When programmatically setting the value, a reverse lookup is performed
among the options to check that the value is valid. The reverse lookup uses
the equality operator by default, but another predicate may be provided via
the ``equals`` keyword argument. For example, when dealing with numpy arrays,
one may set equals=np.array_equal.
"""
value = Tuple(help="Selected values")
label = Tuple(help="Selected labels")
index = Tuple(help="Selected indices").tag(sync=True)
options = Any((),
help="""Iterable of values, (label, value) pairs, or a mapping of {label: value} pairs that the user can select.
The labels are the strings that will be displayed in the UI, representing the
actual Python choices, and should be unique.
""")
_options_full = None
# This being read-only means that it cannot be changed from the frontend!
_options_labels = Tuple(read_only=True, help="The labels for the options.").tag(sync=True)
disabled = Bool(help="Enable or disable user changes").tag(sync=True)
def __init__(self, *args, **kwargs):
self.equals = kwargs.pop('equals', lambda x, y: x == y)
# We have to make the basic options bookkeeping consistent
# so we don't have errors the first time validators run
self._initializing_traits_ = True
options = _make_options(kwargs.get('options', ()))
self._full_options = options
self.set_trait('_options_labels', tuple(i[0] for i in options))
self._options_values = tuple(i[1] for i in options)
super(_MultipleSelection, self).__init__(*args, **kwargs)
self._initializing_traits_ = False
@validate('options')
def _validate_options(self, proposal):
if isinstance(proposal.value, Iterable) and not isinstance(proposal.value, Mapping):
proposal.value = tuple(proposal.value)
# throws an error if there is a problem converting to full form
self._options_full = _make_options(proposal.value)
return proposal.value
@observe('options')
def _propagate_options(self, change):
"Unselect any option"
options = self._options_full
self.set_trait('_options_labels', tuple(i[0] for i in options))
self._options_values = tuple(i[1] for i in options)
if self._initializing_traits_ is not True:
self.index = ()
@validate('index')
def _validate_index(self, proposal):
"Check the range of each proposed index."
if all(0 <= i < len(self._options_labels) for i in proposal.value):
return proposal.value
else:
raise TraitError('Invalid selection: index out of bounds')
@observe('index')
def _propagate_index(self, change):
"Propagate changes in index to the value and label properties"
label = tuple(self._options_labels[i] for i in change.new)
value = tuple(self._options_values[i] for i in change.new)
# we check equality so we can avoid validation if possible
if self.label != label:
self.label = label
if self.value != value:
self.value = value
@validate('value')
def _validate_value(self, proposal):
"Replace all values with the actual objects in the options list"
try:
return tuple(findvalue(self._options_values, i, self.equals) for i in proposal.value)
except ValueError:
raise TraitError('Invalid selection: value not found')
@observe('value')
def _propagate_value(self, change):
index = tuple(self._options_values.index(i) for i in change.new)
if self.index != index:
self.index = index
@validate('label')
def _validate_label(self, proposal):
if any(i not in self._options_labels for i in proposal.value):
raise TraitError('Invalid selection: label not found')
return proposal.value
@observe('label')
def _propagate_label(self, change):
index = tuple(self._options_labels.index(i) for i in change.new)
if self.index != index:
self.index = index
def _repr_keys(self):
keys = super(_MultipleSelection, self)._repr_keys()
# Include options manually, as it isn't marked as synced:
for key in sorted(chain(keys, ('options',))):
yield key
@register
class ToggleButtonsStyle(Style, CoreWidget):
"""Button style widget.
Parameters
----------
button_width: str
The width of each button. This should be a valid CSS
width, e.g. '10px' or '5em'.
"""
_model_name = Unicode('ToggleButtonsStyleModel').tag(sync=True)
button_width = Unicode(help="The width of each button.").tag(sync=True)
@register
@_doc_subst
class ToggleButtons(_Selection):
"""Group of toggle buttons that represent an enumeration.
Only one toggle button can be toggled at any point in time.
Parameters
----------
{selection_params}
tooltips: list
Tooltip for each button. If specified, must be the
same length as `options`.
icons: list
Icons to show on the buttons. This must be the name
of a font-awesome icon. See `http://fontawesome.io/icons/`
for a list of icons.
button_style: str
One of 'primary', 'success', 'info', 'warning' or
'danger'. Applies a predefined style to every button.
style: ToggleButtonsStyle
Style parameters for the buttons.
"""
_view_name = Unicode('ToggleButtonsView').tag(sync=True)
_model_name = Unicode('ToggleButtonsModel').tag(sync=True)
tooltips = List(Unicode(), help="Tooltips for each button.").tag(sync=True)
icons = List(Unicode(), help="Icons names for each button (FontAwesome names without the fa- prefix).").tag(sync=True)
style = InstanceDict(ToggleButtonsStyle).tag(sync=True, **widget_serialization)
button_style = CaselessStrEnum(
values=['primary', 'success', 'info', 'warning', 'danger', ''],
default_value='', allow_none=True, help="""Use a predefined styling for the buttons.""").tag(sync=True)
@register
@_doc_subst
class Dropdown(_Selection):
"""Allows you to select a single item from a dropdown.
Parameters
----------
{selection_params}
"""
_view_name = Unicode('DropdownView').tag(sync=True)
_model_name = Unicode('DropdownModel').tag(sync=True)
@register
@_doc_subst
class RadioButtons(_Selection):
"""Group of radio buttons that represent an enumeration.
Only one radio button can be toggled at any point in time.
Parameters
----------
{selection_params}
"""
_view_name = Unicode('RadioButtonsView').tag(sync=True)
_model_name = Unicode('RadioButtonsModel').tag(sync=True)
@register
@_doc_subst
class Select(_Selection):
"""
Listbox that only allows one item to be selected at any given time.
Parameters
----------
{selection_params}
rows: int
The number of rows to display in the widget.
"""
_view_name = Unicode('SelectView').tag(sync=True)
_model_name = Unicode('SelectModel').tag(sync=True)
rows = Int(5, help="The number of rows to display.").tag(sync=True)
@register
@_doc_subst
class SelectMultiple(_MultipleSelection):
"""
Listbox that allows many items to be selected at any given time.
The ``value``, ``label`` and ``index`` attributes are all iterables.
Parameters
----------
{multiple_selection_params}
rows: int
The number of rows to display in the widget.
"""
_view_name = Unicode('SelectMultipleView').tag(sync=True)
_model_name = Unicode('SelectMultipleModel').tag(sync=True)
rows = Int(5, help="The number of rows to display.").tag(sync=True)
class _SelectionNonempty(_Selection):
"""Selection that is guaranteed to have a value selected."""
# don't allow None to be an option.
value = Any(help="Selected value")
label = Unicode(help="Selected label")
index = Int(help="Selected index").tag(sync=True)
def __init__(self, *args, **kwargs):
if len(kwargs.get('options', ())) == 0:
raise TraitError('options must be nonempty')
super(_SelectionNonempty, self).__init__(*args, **kwargs)
@validate('options')
def _validate_options(self, proposal):
if isinstance(proposal.value, Iterable) and not isinstance(proposal.value, Mapping):
proposal.value = tuple(proposal.value)
self._options_full = _make_options(proposal.value)
if len(self._options_full) == 0:
raise TraitError("Option list must be nonempty")
return proposal.value
class _MultipleSelectionNonempty(_MultipleSelection):
"""Selection that is guaranteed to have an option available."""
def __init__(self, *args, **kwargs):
if len(kwargs.get('options', ())) == 0:
raise TraitError('options must be nonempty')
super(_MultipleSelectionNonempty, self).__init__(*args, **kwargs)
@validate('options')
def _validate_options(self, proposal):
if isinstance(proposal.value, Iterable) and not isinstance(proposal.value, Mapping):
proposal.value = tuple(proposal.value)
# throws an error if there is a problem converting to full form
self._options_full = _make_options(proposal.value)
if len(self._options_full) == 0:
raise TraitError("Option list must be nonempty")
return proposal.value
@register
@_doc_subst
class SelectionSlider(_SelectionNonempty):
"""
Slider to select a single item from a list or dictionary.
Parameters
----------
{selection_params}
{slider_params}
"""
_view_name = Unicode('SelectionSliderView').tag(sync=True)
_model_name = Unicode('SelectionSliderModel').tag(sync=True)
orientation = CaselessStrEnum(
values=['horizontal', 'vertical'], default_value='horizontal',
allow_none=False, help="Vertical or horizontal.").tag(sync=True)
readout = Bool(True,
help="Display the current selected label next to the slider").tag(sync=True)
continuous_update = Bool(True,
help="Update the value of the widget as the user is holding the slider.").tag(sync=True)
@register
@_doc_subst
class SelectionRangeSlider(_MultipleSelectionNonempty):
"""
Slider to select multiple contiguous items from a list.
The index, value, and label attributes contain the start and end of
the selection range, not all items in the range.
Parameters
----------
{multiple_selection_params}
{slider_params}
"""
_view_name = Unicode('SelectionRangeSliderView').tag(sync=True)
_model_name = Unicode('SelectionRangeSliderModel').tag(sync=True)
value = Tuple(help="Min and max selected values")
label = Tuple(help="Min and max selected labels")
index = Tuple((0,0), help="Min and max selected indices").tag(sync=True)
@observe('options')
def _propagate_options(self, change):
"Select the first range"
options = self._options_full
self.set_trait('_options_labels', tuple(i[0] for i in options))
self._options_values = tuple(i[1] for i in options)
if self._initializing_traits_ is not True:
self.index = (0, 0)
@validate('index')
def _validate_index(self, proposal):
"Make sure we have two indices and check the range of each proposed index."
if len(proposal.value) != 2:
raise TraitError('Invalid selection: index must have two values, but is %r'%(proposal.value,))
if all(0 <= i < len(self._options_labels) for i in proposal.value):
return proposal.value
else:
raise TraitError('Invalid selection: index out of bounds: %s'%(proposal.value,))
orientation = CaselessStrEnum(
values=['horizontal', 'vertical'], default_value='horizontal',
allow_none=False, help="Vertical or horizontal.").tag(sync=True)
readout = Bool(True,
help="Display the current selected label next to the slider").tag(sync=True)
continuous_update = Bool(True,
help="Update the value of the widget as the user is holding the slider.").tag(sync=True)
| |
# -*- coding: utf-8 -*-
import subprocess
import struct
import os
import json
import uuid
import StringIO
import django
from django.test import TestCase
from django.test.utils import override_settings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.http import HttpResponseNotAllowed
from django.core import management
try:
from django.utils.timezone import now as dt_now
except ImportError:
import datetime
dt_now = datetime.datetime.now
from .models import APNService, Device, Notification, NotificationPayloadSizeExceeded
from .http import JSONResponse
from .utils import generate_cert_and_pkey
from .forms import APNServiceForm
from .settings import get_setting
TOKEN = '0fd12510cfe6b0a4a89dc7369c96df956f991e66131dab63398734e8000d0029'
TEST_PEM = os.path.abspath(os.path.join(os.path.dirname(__file__), 'test.pem'))
SSL_SERVER_COMMAND = ('openssl', 's_server', '-accept', '2195', '-cert', TEST_PEM)
class UseMockSSLServerMixin(object):
@classmethod
def setUpClass(cls):
super(UseMockSSLServerMixin, cls).setUpClass()
cls.test_server_proc = subprocess.Popen(SSL_SERVER_COMMAND, stdout=subprocess.PIPE)
@classmethod
def tearDownClass(cls):
cls.test_server_proc.kill()
super(UseMockSSLServerMixin, cls).tearDownClass()
class APNServiceTest(UseMockSSLServerMixin, TestCase):
def setUp(self):
cert, key = generate_cert_and_pkey()
self.service = APNService.objects.create(name='test-service', hostname='127.0.0.1',
certificate=cert, private_key=key)
self.device = Device.objects.create(token=TOKEN, service=self.service)
self.notification = Notification.objects.create(message='Test message', service=self.service)
def test_invalid_payload_size(self):
n = Notification(message='.' * 250)
self.assertRaises(NotificationPayloadSizeExceeded, self.service.pack_message, n.payload, self.device)
def test_payload_packed_correctly(self):
fmt = self.service.fmt
payload = self.notification.payload
msg = self.service.pack_message(payload, self.device)
unpacked = struct.unpack(fmt % len(payload), msg)
self.assertEqual(unpacked[-1], payload)
def test_pack_message_with_invalid_device(self):
self.assertRaises(TypeError, self.service.pack_message, None)
def test_can_connect_and_push_notification(self):
self.assertIsNone(self.notification.last_sent_at)
self.assertIsNone(self.device.last_notified_at)
self.service.push_notification_to_devices(self.notification, [self.device])
self.assertIsNotNone(self.notification.last_sent_at)
self.device = Device.objects.get(pk=self.device.pk) # Refresh the object with values from db
self.assertIsNotNone(self.device.last_notified_at)
def test_create_with_passphrase(self):
cert, key = generate_cert_and_pkey(as_string=True, passphrase='pass')
form = APNServiceForm({'name': 'test', 'hostname': 'localhost', 'certificate': cert, 'private_key': key, 'passphrase': 'pass'})
self.assertTrue(form.is_valid())
def test_create_with_invalid_passphrase(self):
cert, key = generate_cert_and_pkey(as_string=True, passphrase='correct')
form = APNServiceForm({'name': 'test', 'hostname': 'localhost', 'certificate': cert, 'private_key': key, 'passphrase': 'incorrect'})
self.assertFalse(form.is_valid())
self.assertTrue('passphrase' in form.errors)
def test_pushing_notification_in_chunks(self):
devices = []
for i in xrange(10):
token = uuid.uuid1().get_hex() * 2
device = Device.objects.create(token=token, service=self.service)
devices.append(device)
started_at = dt_now()
self.service.push_notification_to_devices(self.notification, devices, chunk_size=2)
device_count = len(devices)
self.assertEquals(device_count,
Device.objects.filter(last_notified_at__gte=started_at).count())
@override_settings(IOS_NOTIFICATIONS_AUTHENTICATION='AuthNone')
class APITest(UseMockSSLServerMixin, TestCase):
urls = 'ios_notifications.urls'
def setUp(self):
cert, key = generate_cert_and_pkey()
self.service = APNService.objects.create(name='test-service', hostname='127.0.0.1',
certificate=cert, private_key=key)
self.device_token = TOKEN
self.user = User.objects.create(username='testuser', email='test@example.com')
self.device = Device.objects.create(service=self.service, token='0fd12510cfe6b0a4a89dc7369d96df956f991e66131dab63398734e8000d0029')
def test_register_device_invalid_params(self):
"""
Test that sending a POST request to the device API
without POST parameters `token` and `service` results
in a 400 bad request response.
"""
resp = self.client.post(reverse('ios-notifications-device-create'))
self.assertEqual(resp.status_code, 400)
self.assertTrue(isinstance(resp, JSONResponse))
content = json.loads(resp.content)
keys = content.keys()
self.assertTrue('token' in keys and 'service' in keys)
def test_register_device(self):
"""
Test a device is created when calling the API with the correct
POST parameters.
"""
resp = self.client.post(reverse('ios-notifications-device-create'),
{'token': self.device_token,
'service': self.service.id})
self.assertEqual(resp.status_code, 201)
self.assertTrue(isinstance(resp, JSONResponse))
content = resp.content
device_json = json.loads(content)
self.assertEqual(device_json.get('model'), 'ios_notifications.device')
def test_disallowed_method(self):
resp = self.client.delete(reverse('ios-notifications-device-create'))
self.assertEqual(resp.status_code, 405)
self.assertTrue(isinstance(resp, HttpResponseNotAllowed))
def test_update_device(self):
kwargs = {'token': self.device.token, 'service__id': self.device.service.id}
url = reverse('ios-notifications-device', kwargs=kwargs)
resp = self.client.put(url, 'users=%d&platform=iPhone' % self.user.id,
content_type='application/x-www-form-urlencode')
self.assertEqual(resp.status_code, 200)
self.assertTrue(isinstance(resp, JSONResponse))
device_json = json.loads(resp.content)
self.assertEqual(device_json.get('pk'), self.device.id)
self.assertTrue(self.user in self.device.users.all())
def test_get_device_details(self):
kwargs = {'token': self.device.token, 'service__id': self.device.service.id}
url = reverse('ios-notifications-device', kwargs=kwargs)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
content = resp.content
device_json = json.loads(content)
self.assertEqual(device_json.get('model'), 'ios_notifications.device')
class AuthenticationDecoratorTestAuthBasic(UseMockSSLServerMixin, TestCase):
urls = 'ios_notifications.urls'
def setUp(self):
cert, key = generate_cert_and_pkey()
self.service = APNService.objects.create(name='test-service', hostname='127.0.0.1',
certificate=cert, private_key=key)
self.device_token = TOKEN
self.user_password = 'abc123'
self.user = User.objects.create(username='testuser', email='test@example.com')
self.user.set_password(self.user_password)
self.user.is_staff = True
self.user.save()
self.device = Device.objects.create(service=self.service, token='0fd12510cfe6b0a4a89dc7369d96df956f991e66131dab63398734e8000d0029')
@override_settings(IOS_NOTIFICATIONS_AUTHENTICATION='AuthBasic')
def test_basic_authorization_request(self):
kwargs = {'token': self.device.token, 'service__id': self.device.service.id}
url = reverse('ios-notifications-device', kwargs=kwargs)
user_pass = '%s:%s' % (self.user.username, self.user_password)
auth_header = 'Basic %s' % user_pass.encode('base64')
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=auth_header)
self.assertEquals(resp.status_code, 200)
@override_settings(IOS_NOTIFICATIONS_AUTHENTICATION='AuthBasic')
def test_basic_authorization_request_invalid_credentials(self):
user_pass = '%s:%s' % (self.user.username, 'invalidpassword')
auth_header = 'Basic %s' % user_pass.encode('base64')
url = reverse('ios-notifications-device-create')
resp = self.client.get(url, HTTP_AUTHORIZATION=auth_header)
self.assertEquals(resp.status_code, 401)
self.assertTrue('authentication error' in resp.content)
@override_settings(IOS_NOTIFICATIONS_AUTHENTICATION='AuthBasic')
def test_basic_authorization_missing_header(self):
url = reverse('ios-notifications-device-create')
resp = self.client.get(url)
self.assertEquals(resp.status_code, 401)
self.assertTrue('Authorization header not set' in resp.content)
@override_settings(IOS_NOTIFICATIONS_AUTHENTICATION='AuthDoesNotExist')
def test_invalid_authentication_type(self):
from ios_notifications.decorators import InvalidAuthenticationType
url = reverse('ios-notifications-device-create')
self.assertRaises(InvalidAuthenticationType, self.client.get, url)
@override_settings(IOS_NOTIFICATIONS_AUTHENTICATION=None)
def test_no_authentication_type(self):
from ios_notifications.decorators import InvalidAuthenticationType
url = reverse('ios-notifications-device-create')
self.assertRaises(InvalidAuthenticationType, self.client.get, url)
@override_settings(IOS_NOTIFICATIONS_AUTHENTICATION='AuthBasicIsStaff')
def test_basic_authorization_is_staff(self):
kwargs = {'token': self.device.token, 'service__id': self.device.service.id}
url = reverse('ios-notifications-device', kwargs=kwargs)
user_pass = '%s:%s' % (self.user.username, self.user_password)
auth_header = 'Basic %s' % user_pass.encode('base64')
self.user.is_staff = True
resp = self.client.get(url, HTTP_AUTHORIZATION=auth_header)
self.assertEquals(resp.status_code, 200)
@override_settings(IOS_NOTIFICATIONS_AUTHENTICATION='AuthBasicIsStaff')
def test_basic_authorization_is_staff_with_non_staff_user(self):
kwargs = {'token': self.device.token, 'service__id': self.device.service.id}
url = reverse('ios-notifications-device', kwargs=kwargs)
user_pass = '%s:%s' % (self.user.username, self.user_password)
auth_header = 'Basic %s' % user_pass.encode('base64')
self.user.is_staff = False
self.user.save()
resp = self.client.get(url, HTTP_AUTHORIZATION=auth_header)
self.assertEquals(resp.status_code, 401)
self.assertTrue('authentication error' in resp.content)
class NotificationTest(UseMockSSLServerMixin, TestCase):
def setUp(self):
cert, key = generate_cert_and_pkey()
self.service = APNService.objects.create(name='service', hostname='127.0.0.1',
private_key=key, certificate=cert)
self.service.PORT = 2195 # For ease of use simply change port to default port in test_server
self.custom_payload = json.dumps({"." * 10: "." * 50})
self.notification = Notification.objects.create(service=self.service, message='Test message', custom_payload=self.custom_payload)
def test_valid_length(self):
self.notification.message = 'test message'
self.assertTrue(self.notification.is_valid_length())
def test_invalid_length(self):
self.notification.message = '.' * 250
self.assertFalse(self.notification.is_valid_length())
def test_invalid_length_with_custom_payload(self):
self.notification.message = '.' * 100
self.notification.custom_payload = '{"%s":"%s"}' % ("." * 20, "." * 120)
self.assertFalse(self.notification.is_valid_length())
def test_extra_property_with_custom_payload(self):
custom_payload = {"." * 10: "." * 50, "nested": {"+" * 10: "+" * 50}}
self.notification.extra = custom_payload
self.assertEqual(self.notification.custom_payload, json.dumps(custom_payload))
self.assertEqual(self.notification.extra, custom_payload)
self.assertTrue(self.notification.is_valid_length())
def test_loc_data_payload(self):
self.notification.set_loc_data('TEST_1', [1, 'ab', 1.2, 'CD'])
self.notification.message = 'test message'
loc_data = {'loc-key': 'TEST_1', 'loc-args': ['1', 'ab', '1.2', 'CD']}
self.assertEqual(self.notification.loc_data, loc_data)
self.assertTrue(self.notification.is_valid_length())
p = self.notification.payload
self.assertEqual(json.loads(p)['aps']['alert'], loc_data)
def test_extra_property_not_dict(self):
with self.assertRaises(TypeError):
self.notification.extra = 111
def test_extra_property_none(self):
self.notification.extra = None
self.assertEqual(self.notification.extra, None)
self.assertEqual(self.notification.custom_payload, '')
self.assertTrue(self.notification.is_valid_length())
def test_push_to_all_devices_persist_existing(self):
self.assertIsNone(self.notification.last_sent_at)
self.notification.persist = False
self.notification.push_to_all_devices()
self.assertIsNotNone(self.notification.last_sent_at)
def test_push_to_all_devices_persist_new(self):
notification = Notification(service=self.service, message='Test message (new)')
notification.persist = True
notification.push_to_all_devices()
self.assertIsNotNone(notification.last_sent_at)
self.assertIsNotNone(notification.pk)
def test_push_to_all_devices_no_persist(self):
notification = Notification(service=self.service, message='Test message (new)')
notification.persist = False
notification.push_to_all_devices()
self.assertIsNone(notification.last_sent_at)
self.assertIsNone(notification.pk)
class ManagementCommandPushNotificationTest(UseMockSSLServerMixin, TestCase):
def setUp(self):
self.started_at = dt_now()
cert, key = generate_cert_and_pkey()
self.service = APNService.objects.create(name='service', hostname='127.0.0.1',
private_key=key, certificate=cert)
self.service.PORT = 2195
self.device = Device.objects.create(token=TOKEN, service=self.service)
def test_call_push_ios_notification_command_explicit_persist(self):
msg = 'some message'
management.call_command('push_ios_notification', **{'message': msg, 'service': self.service.id, 'verbosity': 0, 'persist': True})
self.assertTrue(Notification.objects.filter(message=msg, last_sent_at__gt=self.started_at).exists())
self.assertTrue(self.device in Device.objects.filter(last_notified_at__gt=self.started_at))
def test_call_push_ios_notification_command_explicit_no_persist(self):
msg = 'some message'
management.call_command('push_ios_notification', **{'message': msg, 'service': self.service.id, 'verbosity': 0, 'persist': False})
self.assertFalse(Notification.objects.filter(message=msg, last_sent_at__gt=self.started_at).exists())
self.assertTrue(self.device in Device.objects.filter(last_notified_at__gt=self.started_at))
@override_settings(IOS_NOTIFICATIONS_PERSIST_NOTIFICATIONS=True)
def test_call_push_ios_notification_command_default_persist(self):
msg = 'some message'
management.call_command('push_ios_notification', **{'message': msg, 'service': self.service.id, 'verbosity': 0})
self.assertTrue(Notification.objects.filter(message=msg, last_sent_at__gt=self.started_at).exists())
self.assertTrue(self.device in Device.objects.filter(last_notified_at__gt=self.started_at))
@override_settings(IOS_NOTIFICATIONS_PERSIST_NOTIFICATIONS=False)
def test_call_push_ios_notification_command_default_no_persist(self):
msg = 'some message'
management.call_command('push_ios_notification', **{'message': msg, 'service': self.service.id, 'verbosity': 0})
self.assertFalse(Notification.objects.filter(message=msg, last_sent_at__gt=self.started_at).exists())
self.assertTrue(self.device in Device.objects.filter(last_notified_at__gt=self.started_at))
@override_settings()
def test_call_push_ios_notification_command_default_persist_not_specified(self):
try:
# making sure that IOS_NOTIFICATIONS_PERSIST_NOTIFICATIONS is not specified in app settings, otherwise this test means nothing
del settings.IOS_NOTIFICATIONS_PERSIST_NOTIFICATIONS
except AttributeError:
pass
msg = 'some message'
management.call_command('push_ios_notification', **{'message': msg, 'service': self.service.id, 'verbosity': 0})
self.assertTrue(Notification.objects.filter(message=msg, last_sent_at__gt=self.started_at).exists())
self.assertTrue(self.device in Device.objects.filter(last_notified_at__gt=self.started_at))
def test_either_message_or_extra_option_required(self):
# In Django < 1.5 django.core.management.base.BaseCommand.execute
# catches CommandError and raises SystemExit instead.
exception = SystemExit if django.VERSION < (1, 5) else management.base.CommandError
with self.assertRaises(exception):
management.call_command('push_ios_notification', service=self.service.pk,
verbosity=0, stderr=StringIO.StringIO())
class ManagementCommandCallFeedbackService(TestCase):
pass
class DefaultSettings(TestCase):
def test_persist_notifications_setting(self):
self.assertEqual(True, get_setting('IOS_NOTIFICATIONS_PERSIST_NOTIFICATIONS'))
def test_authentication_setting(self):
self.assertEqual(None, get_setting('IOS_NOTIFICATIONS_AUTHENTICATION'))
def test_auth_user_model(self):
self.assertEqual('auth.User', get_setting('AUTH_USER_MODEL'))
def test_invalid_setting(self):
setting_name = '_THIS_SETTING_SHOULD_NOT_EXIST__________'
with self.assertRaises(KeyError):
get_setting(setting_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.