gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Methods related to test expectations/expectation files."""
from __future__ import print_function
import collections
import datetime
import logging
import os
import re
import subprocess
import sys
import six
from typ import expectations_parser
from unexpected_passes_common import data_types
from unexpected_passes_common import result_output
FINDER_DISABLE_COMMENT_BASE = 'finder:disable'
FINDER_ENABLE_COMMENT_BASE = 'finder:enable'
FINDER_COMMENT_SUFFIX_GENERAL = '-general'
FINDER_COMMENT_SUFFIX_STALE = '-stale'
FINDER_COMMENT_SUFFIX_UNUSED = '-unused'
ALL_FINDER_SUFFIXES = frozenset([
FINDER_COMMENT_SUFFIX_GENERAL,
FINDER_COMMENT_SUFFIX_STALE,
FINDER_COMMENT_SUFFIX_UNUSED,
])
FINDER_DISABLE_COMMENT_GENERAL = (FINDER_DISABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_GENERAL)
FINDER_DISABLE_COMMENT_STALE = (FINDER_DISABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_STALE)
FINDER_DISABLE_COMMENT_UNUSED = (FINDER_DISABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_UNUSED)
FINDER_ENABLE_COMMENT_GENERAL = (FINDER_ENABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_GENERAL)
FINDER_ENABLE_COMMENT_STALE = (FINDER_ENABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_STALE)
FINDER_ENABLE_COMMENT_UNUSED = (FINDER_ENABLE_COMMENT_BASE +
FINDER_COMMENT_SUFFIX_UNUSED)
FINDER_DISABLE_COMMENTS = frozenset([
FINDER_DISABLE_COMMENT_GENERAL, FINDER_DISABLE_COMMENT_STALE,
FINDER_DISABLE_COMMENT_UNUSED
])
FINDER_ENABLE_COMMENTS = frozenset([
FINDER_ENABLE_COMMENT_GENERAL,
FINDER_ENABLE_COMMENT_STALE,
FINDER_ENABLE_COMMENT_UNUSED,
])
ALL_FINDER_COMMENTS = frozenset(FINDER_DISABLE_COMMENTS
| FINDER_ENABLE_COMMENTS)
GIT_BLAME_REGEX = re.compile(
r'^[\w\s]+\(.+(?P<date>\d\d\d\d-\d\d-\d\d)[^\)]+\)(?P<content>.*)$',
re.DOTALL)
EXPECTATION_LINE_REGEX = re.compile(r'^.*\[ .* \] .* \[ \w* \].*$', re.DOTALL)
# pylint: disable=useless-object-inheritance
class RemovalType(object):
STALE = FINDER_COMMENT_SUFFIX_STALE
UNUSED = FINDER_COMMENT_SUFFIX_UNUSED
class Expectations(object):
def CreateTestExpectationMap(self, expectation_files, tests, grace_period):
"""Creates an expectation map based off a file or list of tests.
Args:
expectation_files: A filepath or list of filepaths to expectation files to
read from, or None. If a filepath is specified, |tests| must be None.
tests: An iterable of strings containing test names to check. If
specified, |expectation_file| must be None.
grace_period: An int specifying how many days old an expectation must
be in order to be parsed, i.e. how many days old an expectation must
be before it is a candidate for removal/modification.
Returns:
A data_types.TestExpectationMap, although all its BuilderStepMap contents
will be empty.
"""
def AddContentToMap(content, ex_map, expectation_file_name):
list_parser = expectations_parser.TaggedTestListParser(content)
expectations_for_file = ex_map.setdefault(
expectation_file_name, data_types.ExpectationBuilderMap())
logging.debug('Parsed %d expectations', len(list_parser.expectations))
for e in list_parser.expectations:
if 'Skip' in e.raw_results:
continue
# Expectations that only have a Pass expectation (usually used to
# override a broader, failing expectation) are not handled by the
# unexpected pass finder, so ignore those.
if e.raw_results == ['Pass']:
continue
expectation = data_types.Expectation(e.test, e.tags, e.raw_results,
e.reason)
assert expectation not in expectations_for_file
expectations_for_file[expectation] = data_types.BuilderStepMap()
logging.info('Creating test expectation map')
assert expectation_files or tests
assert not (expectation_files and tests)
expectation_map = data_types.TestExpectationMap()
if expectation_files:
if not isinstance(expectation_files, list):
expectation_files = [expectation_files]
for ef in expectation_files:
expectation_file_name = os.path.normpath(ef)
content = self._GetNonRecentExpectationContent(expectation_file_name,
grace_period)
AddContentToMap(content, expectation_map, expectation_file_name)
else:
expectation_file_name = ''
content = '# results: [ RetryOnFailure ]\n'
for t in tests:
content += '%s [ RetryOnFailure ]\n' % t
AddContentToMap(content, expectation_map, expectation_file_name)
return expectation_map
def _GetNonRecentExpectationContent(self, expectation_file_path, num_days):
"""Gets content from |expectation_file_path| older than |num_days| days.
Args:
expectation_file_path: A string containing a filepath pointing to an
expectation file.
num_days: An int containing how old an expectation in the given
expectation file must be to be included.
Returns:
The contents of the expectation file located at |expectation_file_path|
as a string with any recent expectations removed.
"""
num_days = datetime.timedelta(days=num_days)
content = ''
# `git blame` output is normally in the format:
# revision optional_filename (author date time timezone lineno) line_content
# The --porcelain option is meant to be more machine readable, but is much
# more difficult to parse for what we need to do here. In order to
# guarantee that the filename won't be included in the output (by default,
# it will be shown if there is content from a renamed file), pass -c to
# use the same format as `git annotate`, which is:
# revision (author date time timezone lineno)line_content
# (Note the lack of space between the ) and the content).
cmd = ['git', 'blame', '-c', expectation_file_path]
with open(os.devnull, 'w') as devnull:
blame_output = subprocess.check_output(cmd,
stderr=devnull).decode('utf-8')
for line in blame_output.splitlines(True):
match = GIT_BLAME_REGEX.match(line)
assert match
date = match.groupdict()['date']
line_content = match.groupdict()['content']
if EXPECTATION_LINE_REGEX.match(line):
if six.PY2:
date_parts = date.split('-')
date = datetime.date(year=int(date_parts[0]),
month=int(date_parts[1]),
day=int(date_parts[2]))
else:
date = datetime.date.fromisoformat(date)
date_diff = datetime.date.today() - date
if date_diff > num_days:
content += line_content
else:
logging.debug('Omitting expectation %s because it is too new',
line_content.rstrip())
else:
content += line_content
return content
def RemoveExpectationsFromFile(self, expectations, expectation_file,
removal_type):
"""Removes lines corresponding to |expectations| from |expectation_file|.
Ignores any lines that match but are within a disable block or have an
inline disable comment.
Args:
expectations: A list of data_types.Expectations to remove.
expectation_file: A filepath pointing to an expectation file to remove
lines from.
removal_type: A RemovalType enum corresponding to the type of expectations
being removed.
Returns:
A set of strings containing URLs of bugs associated with the removed
expectations.
"""
with open(expectation_file) as f:
input_contents = f.read()
output_contents = ''
in_disable_block = False
disable_block_reason = ''
disable_block_suffix = ''
removed_urls = set()
for line in input_contents.splitlines(True):
# Auto-add any comments or empty lines
stripped_line = line.strip()
if _IsCommentOrBlankLine(stripped_line):
output_contents += line
# Only allow one enable/disable per line.
assert len([c for c in ALL_FINDER_COMMENTS if c in line]) <= 1
# Handle disable/enable block comments.
if _LineContainsDisableComment(line):
if in_disable_block:
raise RuntimeError(
'Invalid expectation file %s - contains a disable comment "%s" '
'that is in another disable block.' %
(expectation_file, stripped_line))
in_disable_block = True
disable_block_reason = _GetDisableReasonFromComment(line)
disable_block_suffix = _GetFinderCommentSuffix(line)
if _LineContainsEnableComment(line):
if not in_disable_block:
raise RuntimeError(
'Invalid expectation file %s - contains an enable comment "%s" '
'that is outside of a disable block.' %
(expectation_file, stripped_line))
in_disable_block = False
continue
current_expectation = self._CreateExpectationFromExpectationFileLine(
line, expectation_file)
# Add any lines containing expectations that don't match any of the given
# expectations to remove.
if any(e for e in expectations if e == current_expectation):
# Skip any expectations that match if we're in a disable block or there
# is an inline disable comment.
if in_disable_block and _DisableSuffixIsRelevant(
disable_block_suffix, removal_type):
output_contents += line
logging.info(
'Would have removed expectation %s, but inside a disable block '
'with reason %s', stripped_line, disable_block_reason)
elif _LineContainsRelevantDisableComment(line, removal_type):
output_contents += line
logging.info(
'Would have removed expectation %s, but it has an inline disable '
'comment with reason %s',
stripped_line.split('#')[0], _GetDisableReasonFromComment(line))
else:
bug = current_expectation.bug
if bug:
# It's possible to have multiple whitespace-separated bugs per
# expectation, so treat each one separately.
removed_urls |= set(bug.split())
else:
output_contents += line
with open(expectation_file, 'w') as f:
f.write(output_contents)
return removed_urls
def _CreateExpectationFromExpectationFileLine(self, line, expectation_file):
"""Creates a data_types.Expectation from |line|.
Args:
line: A string containing a single line from an expectation file.
expectation_file: A filepath pointing to an expectation file |line| came
from.
Returns:
A data_types.Expectation containing the same information as |line|.
"""
header = self._GetExpectationFileTagHeader(expectation_file)
single_line_content = header + line
list_parser = expectations_parser.TaggedTestListParser(single_line_content)
assert len(list_parser.expectations) == 1
typ_expectation = list_parser.expectations[0]
return data_types.Expectation(typ_expectation.test, typ_expectation.tags,
typ_expectation.raw_results,
typ_expectation.reason)
def _GetExpectationFileTagHeader(self, expectation_file):
"""Gets the tag header used for expectation files.
Args:
expectation_file: A filepath pointing to an expectation file to get the
tag header from.
Returns:
A string containing an expectation file header, i.e. the comment block at
the top of the file defining possible tags and expected results.
"""
raise NotImplementedError()
def ModifySemiStaleExpectations(self, stale_expectation_map):
"""Modifies lines from |stale_expectation_map| in |expectation_file|.
Prompts the user for each modification and provides debug information since
semi-stale expectations cannot be blindly removed like fully stale ones.
Args:
stale_expectation_map: A data_types.TestExpectationMap containing stale
expectations.
file_handle: An optional open file-like object to output to. If not
specified, stdout will be used.
Returns:
A set of strings containing URLs of bugs associated with the modified
(manually modified by the user or removed by the script) expectations.
"""
expectations_to_remove = []
expectations_to_modify = []
modified_urls = set()
for expectation_file, e, builder_map in (
stale_expectation_map.IterBuilderStepMaps()):
with open(expectation_file) as infile:
file_contents = infile.read()
line, line_number = self._GetExpectationLine(e, file_contents,
expectation_file)
expectation_str = None
if not line:
logging.error(
'Could not find line corresponding to semi-stale expectation for '
'%s with tags %s and expected results %s', e.test, e.tags,
e.expected_results)
expectation_str = '[ %s ] %s [ %s ]' % (' '.join(
e.tags), e.test, ' '.join(e.expected_results))
else:
expectation_str = '%s (approx. line %d)' % (line, line_number)
str_dict = result_output.ConvertBuilderMapToPassOrderedStringDict(
builder_map)
print('\nSemi-stale expectation:\n%s' % expectation_str)
result_output.RecursivePrintToFile(str_dict, 1, sys.stdout)
response = _WaitForUserInputOnModification()
if response == 'r':
expectations_to_remove.append(e)
elif response == 'm':
expectations_to_modify.append(e)
# It's possible that the user will introduce a typo while manually
# modifying an expectation, which will cause a parser error. Catch that
# now and give them chances to fix it so that they don't lose all of their
# work due to an early exit.
while True:
try:
with open(expectation_file) as infile:
file_contents = infile.read()
_ = expectations_parser.TaggedTestListParser(file_contents)
break
except expectations_parser.ParseError as error:
logging.error('Got parser error: %s', error)
logging.error(
'This probably means you introduced a typo, please fix it.')
_WaitForAnyUserInput()
modified_urls |= self.RemoveExpectationsFromFile(expectations_to_remove,
expectation_file,
RemovalType.STALE)
for e in expectations_to_modify:
modified_urls |= set(e.bug.split())
return modified_urls
def _GetExpectationLine(self, expectation, file_contents, expectation_file):
"""Gets the line and line number of |expectation| in |file_contents|.
Args:
expectation: A data_types.Expectation.
file_contents: A string containing the contents read from an expectation
file.
expectation_file: A string containing the path to the expectation file
that |file_contents| came from.
Returns:
A tuple (line, line_number). |line| is a string containing the exact line
in |file_contents| corresponding to |expectation|. |line_number| is an int
corresponding to where |line| is in |file_contents|. |line_number| may be
off if the file on disk has changed since |file_contents| was read. If a
corresponding line cannot be found, both |line| and |line_number| are
None.
"""
# We have all the information necessary to recreate the expectation line and
# line number can be pulled during the initial expectation parsing. However,
# the information we have is not necessarily in the same order as the
# text file (e.g. tag ordering), and line numbers can change pretty
# dramatically between the initial parse and now due to stale expectations
# being removed. So, parse this way in order to improve the user experience.
file_lines = file_contents.splitlines()
for line_number, line in enumerate(file_lines):
if _IsCommentOrBlankLine(line.strip()):
continue
current_expectation = self._CreateExpectationFromExpectationFileLine(
line, expectation_file)
if expectation == current_expectation:
return line, line_number + 1
return None, None
def FindOrphanedBugs(self, affected_urls):
"""Finds cases where expectations for bugs no longer exist.
Args:
affected_urls: An iterable of affected bug URLs, as returned by functions
such as RemoveExpectationsFromFile.
Returns:
A set containing a subset of |affected_urls| who no longer have any
associated expectations in any expectation files.
"""
seen_bugs = set()
expectation_files = self.GetExpectationFilepaths()
for ef in expectation_files:
with open(ef) as infile:
contents = infile.read()
for url in affected_urls:
if url in seen_bugs:
continue
if url in contents:
seen_bugs.add(url)
return set(affected_urls) - seen_bugs
def GetExpectationFilepaths(self):
"""Gets all the filepaths to expectation files of interest.
Returns:
A list of strings, each element being a filepath pointing towards an
expectation file.
"""
raise NotImplementedError()
def _WaitForAnyUserInput():
"""Waits for any user input.
Split out for testing purposes.
"""
_get_input('Press any key to continue')
def _WaitForUserInputOnModification():
"""Waits for user input on how to modify a semi-stale expectation.
Returns:
One of the following string values:
i - Expectation should be ignored and left alone.
m - Expectation will be manually modified by the user.
r - Expectation should be removed by the script.
"""
valid_inputs = ['i', 'm', 'r']
prompt = ('How should this expectation be handled? (i)gnore/(m)anually '
'modify/(r)emove: ')
response = _get_input(prompt).lower()
while response not in valid_inputs:
print('Invalid input, valid inputs are %s' % (', '.join(valid_inputs)))
response = _get_input(prompt).lower()
return response
def _LineContainsDisableComment(line):
return FINDER_DISABLE_COMMENT_BASE in line
def _LineContainsEnableComment(line):
return FINDER_ENABLE_COMMENT_BASE in line
def _GetFinderCommentSuffix(line):
"""Gets the suffix of the finder comment on the given line.
Examples:
'foo # finder:disable' -> ''
'foo # finder:disable-stale some_reason' -> '-stale'
"""
target_str = None
if _LineContainsDisableComment(line):
target_str = FINDER_DISABLE_COMMENT_BASE
elif _LineContainsEnableComment(line):
target_str = FINDER_ENABLE_COMMENT_BASE
else:
raise RuntimeError('Given line %s did not have a finder comment.' % line)
line = line[line.find(target_str):]
line = line.split()[0]
suffix = line.replace(target_str, '')
assert suffix in ALL_FINDER_SUFFIXES
return suffix
def _LineContainsRelevantDisableComment(line, removal_type):
"""Returns whether the given line contains a relevant disable comment.
Args:
line: A string containing the line to check.
removal_type: A RemovalType enum corresponding to the type of expectations
being removed.
Returns:
A bool denoting whether |line| contains a relevant disable comment given
|removal_type|.
"""
if FINDER_DISABLE_COMMENT_GENERAL in line:
return True
if FINDER_DISABLE_COMMENT_BASE + removal_type in line:
return True
return False
def _DisableSuffixIsRelevant(suffix, removal_type):
"""Returns whether the given suffix is relevant given the removal type.
Args:
suffix: A string containing a disable comment suffix.
removal_type: A RemovalType enum corresponding to the type of expectations
being removed.
Returns:
True if suffix is relevant and its disable request should be honored.
"""
if suffix == FINDER_COMMENT_SUFFIX_GENERAL:
return True
if suffix == removal_type:
return True
return False
def _GetDisableReasonFromComment(line):
suffix = _GetFinderCommentSuffix(line)
return line.split(FINDER_DISABLE_COMMENT_BASE + suffix, 1)[1].strip()
def _IsCommentOrBlankLine(line):
return (not line or line.startswith('#'))
def _get_input(prompt):
if sys.version_info[0] == 2:
return raw_input(prompt)
return input(prompt)
| |
import re
from typing import Dict, Optional
from django.contrib import messages
from django.db.models import Case, Count, IntegerField, Prefetch, Q, Value, When
from django.forms.widgets import HiddenInput
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.html import format_html
from django.views.decorators.http import require_GET
from django_comments.models import Comment
from consents.forms import TermBySlugsForm
from consents.models import Consent, TermOption
from dashboard.forms import (
AssignmentForm,
AutoUpdateProfileForm,
SearchForm,
SendHomeworkForm,
)
from fiscal.models import MembershipTask
from workshops.models import (
Airport,
Badge,
Event,
Membership,
Organization,
Person,
Qualification,
Tag,
Task,
TrainingProgress,
TrainingRequest,
)
from workshops.util import admin_required, login_required
# Terms shown on the trainee dashboard and can be updated by the user.
TERM_SLUGS = ["may-contact", "public-profile", "may-publish-name"]
@login_required
def dispatch(request):
"""If user is admin, then show them admin dashboard; otherwise redirect
them to trainee dashboard."""
if request.user.is_admin:
return redirect(reverse("admin-dashboard"))
else:
return redirect(reverse("trainee-dashboard"))
@admin_required
def admin_dashboard(request):
"""Home page for admins."""
data = request.GET.copy()
if "assigned_to" not in data:
data["assigned_to"] = request.user.id
assignment_form = AssignmentForm(data)
assigned_to: Optional[Person] = None
if assignment_form.is_valid():
assigned_to = assignment_form.cleaned_data["assigned_to"]
current_events = Event.objects.current_events().prefetch_related("tags")
# This annotation may produce wrong number of instructors when
# `unpublished_events` filters out events that contain a specific tag.
# The bug was fixed in #1130.
unpublished_events = (
Event.objects.active()
.unpublished_events()
.select_related("host")
.annotate(
num_instructors=Count(
Case(
When(task__role__name="instructor", then=Value(1)),
output_field=IntegerField(),
)
),
)
.order_by("-start")
)
# assigned events that have unaccepted changes
updated_metadata = Event.objects.active().filter(metadata_changed=True)
current_events = current_events.filter(assigned_to=assigned_to)
unpublished_events = unpublished_events.filter(assigned_to=assigned_to)
updated_metadata = updated_metadata.filter(assigned_to=assigned_to)
context = {
"title": None,
"assignment_form": assignment_form,
"assigned_to": assigned_to,
"current_events": current_events,
"unpublished_events": unpublished_events,
"updated_metadata": updated_metadata.count(),
"main_tags": Tag.objects.main_tags(),
}
return render(request, "dashboard/admin_dashboard.html", context)
# ------------------------------------------------------------
# Views for trainees
@login_required
def trainee_dashboard(request):
qs = Person.objects.select_related("airport").prefetch_related(
"badges",
"lessons",
"domains",
"languages",
Prefetch(
"task_set",
queryset=Task.objects.select_related("event", "role"),
),
Prefetch(
"membershiptask_set",
queryset=MembershipTask.objects.select_related("membership", "role"),
),
)
user = get_object_or_404(qs, id=request.user.id)
consents = (
Consent.objects.active()
.filter(
term__slug__in=TERM_SLUGS,
person=user,
)
.select_related("term", "term_option")
)
consent_by_term_slug_label: Dict[str, TermOption] = {}
for consent in consents:
label = consent.term.slug.replace("-", "_")
consent_by_term_slug_label[label] = consent.term_option
context = {"title": "Your profile", "user": user, **consent_by_term_slug_label}
return render(request, "dashboard/trainee_dashboard.html", context)
@login_required
def autoupdate_profile(request):
person = request.user
consent_form_kwargs = {
"initial": {"person": person},
"widgets": {"person": HiddenInput()},
"form_tag": False,
"prefix": "consents",
}
form = AutoUpdateProfileForm(
instance=person, form_tag=False, add_submit_button=False
)
consent_form = TermBySlugsForm(term_slugs=TERM_SLUGS, **consent_form_kwargs)
if request.method == "POST":
form = AutoUpdateProfileForm(request.POST, instance=person)
consent_form = TermBySlugsForm(
request.POST, term_slugs=TERM_SLUGS, **consent_form_kwargs
)
if form.is_valid() and form.instance == person and consent_form.is_valid():
# save lessons
person.lessons.clear()
for lesson in form.cleaned_data["lessons"]:
q = Qualification(lesson=lesson, person=person)
q.save()
# don't save related lessons
del form.cleaned_data["lessons"]
person = form.save()
# save consents
consent_form.save()
messages.success(request, "Your profile was updated.")
return redirect(reverse("trainee-dashboard"))
else:
messages.error(request, "Fix errors below.")
context = {
"title": "Update Your Profile",
"form": form,
"consents_form": consent_form,
}
return render(request, "dashboard/autoupdate_profile.html", context)
@login_required
def training_progress(request):
homework_form = SendHomeworkForm()
# Add information about instructor training progress to request.user.
request.user = (
Person.objects.annotate_with_instructor_eligibility()
.prefetch_related(
Prefetch(
"badges",
to_attr="instructor_badges",
queryset=Badge.objects.instructor_badges(),
),
)
.get(pk=request.user.pk)
)
progresses = request.user.trainingprogress_set.filter(discarded=False)
last_swc_homework = (
progresses.filter(requirement__name="SWC Homework")
.order_by("-created_at")
.first()
)
request.user.swc_homework_in_evaluation = (
last_swc_homework is not None and last_swc_homework.state == "n"
)
last_dc_homework = (
progresses.filter(requirement__name="DC Homework")
.order_by("-created_at")
.first()
)
request.user.dc_homework_in_evaluation = (
last_dc_homework is not None and last_dc_homework.state == "n"
)
last_lc_homework = (
progresses.filter(requirement__name="LC Homework")
.order_by("-created_at")
.first()
)
request.user.lc_homework_in_evaluation = (
last_lc_homework is not None and last_lc_homework.state == "n"
)
if request.method == "POST":
homework_form = SendHomeworkForm(data=request.POST)
if homework_form.is_valid():
# read homework type from POST
hw_type = homework_form.cleaned_data["requirement"]
# create "empty" progress object and fill out
progress = TrainingProgress(
trainee=request.user,
state="n", # not evaluated yet
requirement=hw_type,
)
# create virtual form to validate and save
form = SendHomeworkForm(data=request.POST, instance=progress)
if form.is_valid():
form.save()
messages.success(
request, "Your homework submission will be " "evaluated soon."
)
return redirect(reverse("training-progress"))
context = {
"title": "Your training progress",
"homework_form": homework_form,
}
return render(request, "dashboard/training_progress.html", context)
# ------------------------------------------------------------
@require_GET
@admin_required
def search(request):
"""Search the database by term."""
term = ""
organizations = None
memberships = None
events = None
persons = None
airports = None
training_requests = None
comments = None
if request.method == "GET" and "term" in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
term = form.cleaned_data.get("term", "").strip()
tokens = re.split(r"\s+", term)
results_combined = []
organizations = list(
Organization.objects.filter(
Q(domain__icontains=term) | Q(fullname__icontains=term)
).order_by("fullname")
)
results_combined += organizations
memberships = list(
Membership.objects.filter(
Q(name__icontains=term) | Q(registration_code__icontains=term)
).order_by("-agreement_start")
)
results_combined += memberships
events = list(
Event.objects.filter(
Q(slug__icontains=term)
| Q(host__domain__icontains=term)
| Q(host__fullname__icontains=term)
| Q(url__icontains=term)
| Q(contact__icontains=term)
| Q(venue__icontains=term)
| Q(address__icontains=term)
).order_by("-slug")
)
results_combined += events
# if user searches for two words, assume they mean a person
# name
if len(tokens) == 2:
name1, name2 = tokens
complex_q = (
(Q(personal__icontains=name1) & Q(family__icontains=name2))
| (Q(personal__icontains=name2) & Q(family__icontains=name1))
| Q(email__icontains=term)
| Q(secondary_email__icontains=term)
| Q(github__icontains=term)
)
persons = list(Person.objects.filter(complex_q))
else:
persons = list(
Person.objects.filter(
Q(personal__icontains=term)
| Q(family__icontains=term)
| Q(email__icontains=term)
| Q(secondary_email__icontains=term)
| Q(github__icontains=term)
).order_by("family")
)
results_combined += persons
airports = list(
Airport.objects.filter(
Q(iata__icontains=term) | Q(fullname__icontains=term)
).order_by("iata")
)
results_combined += airports
training_requests = list(
TrainingRequest.objects.filter(
Q(group_name__icontains=term)
| Q(family__icontains=term)
| Q(email__icontains=term)
| Q(github__icontains=term)
| Q(affiliation__icontains=term)
| Q(location__icontains=term)
| Q(user_notes__icontains=term)
)
)
results_combined += training_requests
comments = list(
Comment.objects.filter(
Q(comment__icontains=term)
| Q(user_name__icontains=term)
| Q(user_email__icontains=term)
| Q(user__personal__icontains=term)
| Q(user__family__icontains=term)
| Q(user__email__icontains=term)
| Q(user__github__icontains=term)
).prefetch_related("content_object")
)
results_combined += comments
# only 1 record found? Let's move to it immediately
if len(results_combined) == 1 and not form.cleaned_data["no_redirect"]:
result = results_combined[0]
msg = format_html(
"You were moved to this page, because your search <i>{}</i> "
"yields only this result.",
term,
)
if isinstance(result, Comment):
messages.success(request, msg)
return redirect(
result.content_object.get_absolute_url()
+ "#c{}".format(result.id)
)
elif hasattr(result, "get_absolute_url"):
messages.success(request, msg)
return redirect(result.get_absolute_url())
else:
messages.error(request, "Fix errors below.")
# if empty GET, we'll create a blank form
else:
form = SearchForm()
context = {
"title": "Search",
"form": form,
"term": term,
"organisations": organizations,
"memberships": memberships,
"events": events,
"persons": persons,
"airports": airports,
"comments": comments,
"training_requests": training_requests,
}
return render(request, "dashboard/search.html", context)
| |
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh, eig
from scipy.sparse.linalg import lobpcg, eigs, eigsh
from sklearn.utils.validation import check_random_state
from .validation import check_array
EIGEN_SOLVERS = ['auto', 'dense', 'arpack', 'lobpcg']
BAD_EIGEN_SOLVERS = {}
AMG_KWDS = ['strength', 'aggregate', 'smooth', 'max_levels', 'max_coarse']
try:
from pyamg import smoothed_aggregation_solver
PYAMG_LOADED = True
EIGEN_SOLVERS.append('amg')
except ImportError:
PYAMG_LOADED = False
BAD_EIGEN_SOLVERS['amg'] = """The eigen_solver was set to 'amg',
but pyamg is not available. Please either
install pyamg or use another method."""
def check_eigen_solver(eigen_solver, solver_kwds, size=None, nvec=None):
"""Check that the selected eigensolver is valid
Parameters
----------
eigen_solver : string
string value to validate
size, nvec : int (optional)
if both provided, use the specified problem size and number of vectors
to determine the optimal method to use with eigen_solver='auto'
Returns
-------
eigen_solver : string
The eigen solver. This only differs from the input if
eigen_solver == 'auto' and `size` is specified.
"""
if eigen_solver in BAD_EIGEN_SOLVERS:
raise ValueError(BAD_EIGEN_SOLVERS[eigen_solver])
elif eigen_solver not in EIGEN_SOLVERS:
raise ValueError("Unrecognized eigen_solver: '{0}'."
"Should be one of: {1}".format(eigen_solver,
EIGEN_SOLVERS))
if size is not None and nvec is not None:
# do some checks of the eigensolver
if eigen_solver == 'lobpcg' and size < 5 * nvec + 1:
warnings.warn("lobpcg does not perform well with small matrices or "
"with large numbers of vectors. Switching to 'dense'")
eigen_solver = 'dense'
solver_kwds = None
elif eigen_solver == 'auto':
if size > 200 and nvec < 10:
if PYAMG_LOADED:
eigen_solver = 'amg'
solver_kwds = None
else:
eigen_solver = 'arpack'
solver_kwds = None
else:
eigen_solver = 'dense'
solver_kwds = None
return eigen_solver, solver_kwds
def _is_symmetric(M, tol = 1e-8):
if sparse.isspmatrix(M):
conditions = np.abs((M - M.T).data) < tol
else:
conditions = np.abs((M - M.T)) < tol
return(np.all(conditions))
def eigen_decomposition(G, n_components=8, eigen_solver='auto',
random_state=None,
drop_first=True, largest=True, solver_kwds=None):
"""
Function to compute the eigendecomposition of a square matrix.
Parameters
----------
G : array_like or sparse matrix
The square matrix for which to compute the eigen-decomposition.
n_components : integer, optional
The number of eigenvectors to return
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
attempt to choose the best method for input data (default)
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type.
This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
Algebraic Multigrid solver (requires ``pyamg`` to be installed)
It can be faster on very large, sparse problems, but may also lead
to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
lambdas, diffusion_map : eigenvalues, eigenvectors
"""
n_nodes = G.shape[0]
if drop_first:
n_components = n_components + 1
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=n_nodes,
nvec=n_components)
random_state = check_random_state(random_state)
# Convert G to best type for eigendecomposition
if sparse.issparse(G):
if G.getformat() is not 'csr':
G.tocsr()
G = G.astype(np.float)
# Check for symmetry
is_symmetric = _is_symmetric(G)
# Try Eigen Methods:
if eigen_solver == 'arpack':
# This matches the internal initial state used by ARPACK
v0 = random_state.uniform(-1, 1, G.shape[0])
if is_symmetric:
if largest:
which = 'LM'
else:
which = 'SM'
lambdas, diffusion_map = eigsh(G, k=n_components, which=which,
v0=v0,**(solver_kwds or {}))
else:
if largest:
which = 'LR'
else:
which = 'SR'
lambdas, diffusion_map = eigs(G, k=n_components, which=which,
**(solver_kwds or {}))
lambdas = np.real(lambdas)
diffusion_map = np.real(diffusion_map)
elif eigen_solver == 'amg':
# separate amg & lobpcg keywords:
if solver_kwds is not None:
amg_kwds = {}
lobpcg_kwds = solver_kwds.copy()
for kwd in AMG_KWDS:
if kwd in solver_kwds.keys():
amg_kwds[kwd] = solver_kwds[kwd]
del lobpcg_kwds[kwd]
else:
amg_kwds = None
lobpcg_kwds = None
if not is_symmetric:
raise ValueError("lobpcg requires symmetric matrices.")
if not sparse.issparse(G):
warnings.warn("AMG works better for sparse matrices")
# Use AMG to get a preconditioner and speed up the eigenvalue problem.
ml = smoothed_aggregation_solver(check_array(G, accept_sparse = ['csr']),**(amg_kwds or {}))
M = ml.aspreconditioner()
n_find = min(n_nodes, 5 + 2*n_components)
X = random_state.rand(n_nodes, n_find)
X[:, 0] = (G.diagonal()).ravel()
lambdas, diffusion_map = lobpcg(G, X, M=M, largest=largest,**(lobpcg_kwds or {}))
sort_order = np.argsort(lambdas)
if largest:
lambdas = lambdas[sort_order[::-1]]
diffusion_map = diffusion_map[:, sort_order[::-1]]
else:
lambdas = lambdas[sort_order]
diffusion_map = diffusion_map[:, sort_order]
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components]
elif eigen_solver == "lobpcg":
if not is_symmetric:
raise ValueError("lobpcg requires symmetric matrices.")
n_find = min(n_nodes, 5 + 2*n_components)
X = random_state.rand(n_nodes, n_find)
lambdas, diffusion_map = lobpcg(G, X, largest=largest,**(solver_kwds or {}))
sort_order = np.argsort(lambdas)
if largest:
lambdas = lambdas[sort_order[::-1]]
diffusion_map = diffusion_map[:, sort_order[::-1]]
else:
lambdas = lambdas[sort_order]
diffusion_map = diffusion_map[:, sort_order]
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components]
elif eigen_solver == 'dense':
if sparse.isspmatrix(G):
G = G.todense()
if is_symmetric:
lambdas, diffusion_map = eigh(G,**(solver_kwds or {}))
else:
lambdas, diffusion_map = eig(G,**(solver_kwds or {}))
sort_index = np.argsort(lambdas)
lambdas = lambdas[sort_index]
diffusion_map[:,sort_index]
if largest:# eigh always returns eigenvalues in ascending order
lambdas = lambdas[::-1] # reverse order the e-values
diffusion_map = diffusion_map[:, ::-1] # reverse order the vectors
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components]
return (lambdas, diffusion_map)
def null_space(M, k, k_skip=1, eigen_solver='arpack',
random_state=None, solver_kwds=None):
"""
Find the null space of a matrix M: eigenvectors associated with 0 eigenvalues
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
null_space : estimated k vectors of the null space
error : estimated error (sum of eigenvalues)
Notes
-----
dense solver key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eigh.html
for symmetric problems and
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eig.html#scipy.linalg.eig
for non symmetric problems.
arpack sovler key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigsh.html
for symmetric problems and http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigs.html#scipy.sparse.linalg.eigs
for non symmetric problems.
lobpcg solver keywords: see
http://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html
amg solver keywords: see
http://pyamg.googlecode.com/svn/branches/1.0.x/Docs/html/pyamg.aggregation.html#module-pyamg.aggregation.aggregation
(Note amg solver uses lobpcg and also accepts lobpcg keywords)
"""
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=M.shape[0],
nvec=k + k_skip)
random_state = check_random_state(random_state)
if eigen_solver == 'arpack':
# This matches the internal initial state used by ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
v0=v0,**(solver_kwds or {}))
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(M, eigvals=(0, k+k_skip),overwrite_a=True,
**(solver_kwds or {}))
index = np.argsort(np.abs(eigen_values))
eigen_vectors = eigen_vectors[:, index]
eigen_values = eigen_values[index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
# eigen_values, eigen_vectors = eigh(
# M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
# index = np.argsort(np.abs(eigen_values))
# return eigen_vectors[:, index], np.sum(eigen_values)
elif (eigen_solver == 'amg' or eigen_solver == 'lobpcg'):
# M should be positive semi-definite. Add 1 to make it pos. def.
try:
M = sparse.identity(M.shape[0]) + M
n_components = min(k + k_skip + 10, M.shape[0])
eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
eigen_solver = eigen_solver,
drop_first = False,
largest = False,
random_state=random_state,
solver_kwds=solver_kwds)
eigen_values = eigen_values -1
index = np.argsort(np.abs(eigen_values))
eigen_values = eigen_values[index]
eigen_vectors = eigen_vectors[:, index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
except np.linalg.LinAlgError: # try again with bigger increase
warnings.warn("LOBPCG failed the first time. Increasing Pos Def adjustment.")
M = 2.0*sparse.identity(M.shape[0]) + M
n_components = min(k + k_skip + 10, M.shape[0])
eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
eigen_solver = eigen_solver,
drop_first = False,
largest = False,
random_state=random_state,
solver_kwds=solver_kwds)
eigen_values = eigen_values - 2
index = np.argsort(np.abs(eigen_values))
eigen_values = eigen_values[index]
eigen_vectors = eigen_vectors[:, index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Token persistence service."""
import abc
import copy
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from keystone.common import cache
from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone.i18n import _LW
from keystone.token import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MEMOIZE = cache.get_memoization_decorator(section='token')
REVOCATION_MEMOIZE = cache.get_memoization_decorator(
section='token', expiration_section='revoke')
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
'token_provider_api', 'trust_api')
class PersistenceManager(manager.Manager):
"""Default pivot point for the Token backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
def __init__(self):
super(PersistenceManager, self).__init__(CONF.token.driver)
def _assert_valid(self, token_id, token_ref):
"""Raise TokenNotFound if the token is expired."""
current_time = timeutils.normalize_time(timeutils.utcnow())
expires = token_ref.get('expires')
if not expires or current_time > timeutils.normalize_time(expires):
raise exception.TokenNotFound(token_id=token_id)
def get_token(self, token_id):
if not token_id:
# NOTE(morganfainberg): There are cases when the
# context['token_id'] will in-fact be None. This also saves
# a round-trip to the backend if we don't have a token_id.
raise exception.TokenNotFound(token_id='')
unique_id = utils.generate_unique_id(token_id)
token_ref = self._get_token(unique_id)
# NOTE(morganfainberg): Lift expired checking to the manager, there is
# no reason to make the drivers implement this check. With caching,
# self._get_token could return an expired token. Make sure we behave
# as expected and raise TokenNotFound on those instances.
self._assert_valid(token_id, token_ref)
return token_ref
@MEMOIZE
def _get_token(self, token_id):
# Only ever use the "unique" id in the cache key.
return self.driver.get_token(token_id)
def create_token(self, token_id, data):
unique_id = utils.generate_unique_id(token_id)
data_copy = copy.deepcopy(data)
data_copy['id'] = unique_id
ret = self.driver.create_token(unique_id, data_copy)
if MEMOIZE.should_cache(ret):
# NOTE(morganfainberg): when doing a cache set, you must pass the
# same arguments through, the same as invalidate (this includes
# "self"). First argument is always the value to be cached
self._get_token.set(ret, self, unique_id)
return ret
def delete_token(self, token_id):
if not CONF.token.revoke_by_id:
return
unique_id = utils.generate_unique_id(token_id)
self.driver.delete_token(unique_id)
self._invalidate_individual_token_cache(unique_id)
self.invalidate_revocation_list()
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
if not CONF.token.revoke_by_id:
return
token_list = self.driver.delete_tokens(user_id, tenant_id, trust_id,
consumer_id)
for token_id in token_list:
unique_id = utils.generate_unique_id(token_id)
self._invalidate_individual_token_cache(unique_id)
self.invalidate_revocation_list()
@REVOCATION_MEMOIZE
def list_revoked_tokens(self):
return self.driver.list_revoked_tokens()
def invalidate_revocation_list(self):
# NOTE(morganfainberg): Note that ``self`` needs to be passed to
# invalidate() because of the way the invalidation method works on
# determining cache-keys.
self.list_revoked_tokens.invalidate(self)
def delete_tokens_for_domain(self, domain_id):
"""Delete all tokens for a given domain.
It will delete all the project-scoped tokens for the projects
that are owned by the given domain, as well as any tokens issued
to users that are owned by this domain.
However, deletion of domain_scoped tokens will still need to be
implemented as stated in TODO below.
"""
if not CONF.token.revoke_by_id:
return
projects = self.resource_api.list_projects()
for project in projects:
if project['domain_id'] == domain_id:
for user_id in self.assignment_api.list_user_ids_for_project(
project['id']):
self.delete_tokens_for_user(user_id, project['id'])
# TODO(morganfainberg): implement deletion of domain_scoped tokens.
users = self.identity_api.list_users(domain_id)
user_ids = (user['id'] for user in users)
self.delete_tokens_for_users(user_ids)
def delete_tokens_for_user(self, user_id, project_id=None):
"""Delete all tokens for a given user or user-project combination.
This method adds in the extra logic for handling trust-scoped token
revocations in a single call instead of needing to explicitly handle
trusts in the caller's logic.
"""
if not CONF.token.revoke_by_id:
return
self.delete_tokens(user_id, tenant_id=project_id)
for trust in self.trust_api.list_trusts_for_trustee(user_id):
# Ensure we revoke tokens associated to the trust / project
# user_id combination.
self.delete_tokens(user_id, trust_id=trust['id'],
tenant_id=project_id)
for trust in self.trust_api.list_trusts_for_trustor(user_id):
# Ensure we revoke tokens associated to the trust / project /
# user_id combination where the user_id is the trustor.
# NOTE(morganfainberg): This revocation is a bit coarse, but it
# covers a number of cases such as disabling of the trustor user,
# deletion of the trustor user (for any number of reasons). It
# might make sense to refine this and be more surgical on the
# deletions (e.g. don't revoke tokens for the trusts when the
# trustor changes password). For now, to maintain previous
# functionality, this will continue to be a bit overzealous on
# revocations.
self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'],
tenant_id=project_id)
def delete_tokens_for_users(self, user_ids, project_id=None):
"""Delete all tokens for a list of user_ids.
:param user_ids: list of user identifiers
:param project_id: optional project identifier
"""
if not CONF.token.revoke_by_id:
return
for user_id in user_ids:
self.delete_tokens_for_user(user_id, project_id=project_id)
def _invalidate_individual_token_cache(self, token_id):
# NOTE(morganfainberg): invalidate takes the exact same arguments as
# the normal method, this means we need to pass "self" in (which gets
# stripped off).
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self._get_token.invalidate(self, token_id)
self.token_provider_api.invalidate_individual_token_cache(token_id)
# NOTE(morganfainberg): @dependency.optional() is required here to ensure the
# class-level optional dependency control attribute is populated as empty
# this is because of the override of .__getattr__ and ensures that if the
# optional dependency injector changes attributes, this class doesn't break.
@dependency.optional()
@dependency.requires('token_provider_api')
@dependency.provider('token_api')
class Manager(object):
"""The token_api provider.
This class is a proxy class to the token_provider_api's persistence
manager.
"""
def __init__(self):
# NOTE(morganfainberg): __init__ is required for dependency processing.
super(Manager, self).__init__()
def __getattr__(self, item):
"""Forward calls to the `token_provider_api` persistence manager."""
# NOTE(morganfainberg): Prevent infinite recursion, raise an
# AttributeError for 'token_provider_api' ensuring that the dep
# injection doesn't infinitely try and lookup self.token_provider_api
# on _process_dependencies. This doesn't need an exception string as
# it should only ever be hit on instantiation.
if item == 'token_provider_api':
raise AttributeError()
f = getattr(self.token_provider_api._persistence, item)
LOG.warning(_LW('`token_api.%s` is deprecated as of Juno in favor of '
'utilizing methods on `token_provider_api` and may be '
'removed in Kilo.'), item)
setattr(self, item, f)
return f
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
"""Interface description for a Token driver."""
@abc.abstractmethod
def get_token(self, token_id):
"""Get a token by id.
:param token_id: identity of the token
:type token_id: string
:returns: token_ref
:raises: keystone.exception.TokenNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_token(self, token_id, data):
"""Create a token by id and data.
:param token_id: identity of the token
:type token_id: string
:param data: dictionary with additional reference information
::
{
expires=''
id=token_id,
user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref
}
:type data: dict
:returns: token_ref or None.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_token(self, token_id):
"""Deletes a token by id.
:param token_id: identity of the token
:type token_id: string
:returns: None.
:raises: keystone.exception.TokenNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Deletes tokens by user.
If the tenant_id is not None, only delete the tokens by user id under
the specified tenant.
If the trust_id is not None, it will be used to query tokens and the
user_id will be ignored.
If the consumer_id is not None, only delete the tokens by consumer id
that match the specified consumer id.
:param user_id: identity of user
:type user_id: string
:param tenant_id: identity of the tenant
:type tenant_id: string
:param trust_id: identity of the trust
:type trust_id: string
:param consumer_id: identity of the consumer
:type consumer_id: string
:returns: The tokens that have been deleted.
:raises: keystone.exception.TokenNotFound
"""
if not CONF.token.revoke_by_id:
return
token_list = self._list_tokens(user_id,
tenant_id=tenant_id,
trust_id=trust_id,
consumer_id=consumer_id)
for token in token_list:
try:
self.delete_token(token)
except exception.NotFound:
pass
return token_list
@abc.abstractmethod
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Returns a list of current token_id's for a user
This is effectively a private method only used by the ``delete_tokens``
method and should not be called by anything outside of the
``token_api`` manager or the token driver itself.
:param user_id: identity of the user
:type user_id: string
:param tenant_id: identity of the tenant
:type tenant_id: string
:param trust_id: identity of the trust
:type trust_id: string
:param consumer_id: identity of the consumer
:type consumer_id: string
:returns: list of token_id's
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_revoked_tokens(self):
"""Returns a list of all revoked tokens
:returns: list of token_id's
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def flush_expired_tokens(self):
"""Archive or delete tokens that have expired.
"""
raise exception.NotImplemented() # pragma: no cover
| |
# -*- coding: utf-8 -*-
"""
lantz.drivers.legacy.coherent.innova
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements the drivers for Innova 300 Series gas lasers.
Implementation Notes
--------------------
There are currently 3 drivers implemented Innova300C, ArgonInnova300C
and KryptonInnova300C. The last two only add to the first the
corresponding wavelength selection.
Sources::
- Innova 300C Manual
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from lantz import Q_, Action, Feat, DictFeat
from lantz.errors import InvalidCommand
from lantz.drivers.legacy.serial import SerialDriver
def make_feat(command, **kwargs):
def get(self):
return self.query('PRINT {}'.format(command))
def set(self, value):
return self.query('{}={}'.format(command, value))
if kwargs.pop('readonly', None):
return Feat(fget=get, **kwargs)
elif kwargs.pop('writeonly', None):
return Feat(fset=set, **kwargs)
return Feat(get, set, **kwargs)
class Innova300C(SerialDriver):
"""Innova300 C Series.
"""
ENCODING = 'ascii'
SEND_TERMINATION = '\r\n'
RECV_TERMINATION = '\r\n'
def __init__(self, port=1, baudrate=1200, **kwargs):
super().__init__(port, baudrate, bytesize=8, parity='None',
stopbits=1, **kwargs)
def initialize(self):
super().initialize()
self.echo_enabled = False
def query(self, command, *, send_args=(None, None), recv_args=(None, None)):
"""Send query to the laser and return the answer, after handling
possible errors.
:param command: command to be sent to the instrument
:type command: string
:param send_args: (termination, encoding) to override class defaults
:param recv_args: (termination, encoding) to override class defaults
"""
ans = super().query(command, send_args=send_args, recv_args=recv_args)
# TODO: Echo handling
if ans == 'Out of Range':
raise ValueError()
elif ans.startswith('Syntax Error'):
raise InvalidCommand()
elif ans == 'Laser must be off':
raise Exception('Laser must be off')
return ans
# General information and communication
idn = make_feat('ID',
readonly=True,
doc='Laser identification, should be I300.',
read_once=True)
software_rev = make_feat('SOFTWARE',
readonly=True,
doc='Software revision level in the power supply.',
read_once=True)
head_software_rev = make_feat('HEAD SOFTWARE',
readonly=True,
doc='Software revision level in the laser head board.',
read_once=True)
echo_enabled = make_feat('ECHO',
writeonly=True,
doc='Echo mode of the serial interface.',
values={True: 1, False: 0})
baudrate = Feat(values={110, 300, 1200, 2400, 4800, 9600, 19200})
@baudrate.setter
def baudrate(self, value):
"""RS-232/422 baud rate, the serial connection will be reset after.
"""
self.query('BAUDRATE={}'.format(value))
#TODO: RESET Connection
# Interface
analog_relative = make_feat('ANALOG MODE',
doc='Analog Interface input mode.',
values={True: 1, False: 0})
analog_enabled = make_feat('ANALOGINT',
doc='Analog Interface input state.',
values={True: 1, False: 0})
current_range = make_feat('CURRENT RANGE',
doc='Current corresponding to 5 Volts at the input'\
' or output lines of the Analog Interface.',
units='A',
limits=(10, 100, 1))
control_pin_high = make_feat('CONTROL',
readonly=True,
doc='State of the input pin 10 of the Analog Interface.',
values={True: 1, False: 0})
output_pin_high = make_feat('STATUS',
doc='State of the output pin 24 and 25 of the Analog Interface.',
values={(False, False): 0, (True, False): 1,
(False, True): 2, (True, True): 3})
# Diagnostics
@Feat()
def faults(self):
"""List of all active faults.
"""
return self.query('PRINT FAULT').split('&')
autofill_delta = make_feat('AUTOFILL DELTA',
readonly=True,
doc='Tube voltage minus the autofill setting.',
units='V')
autofill_needed = make_feat('AUTOFILL STATUS',
readonly=True,
doc='Is the autofill needed (wheter fill is enabled or not)',
values={True: 1, False: 0})
remaining_time = make_feat('HRSTILSHUTDOWN',
readonly=True,
doc='Number of hours remaining before the laser '\
'will shut down automatically.',
units='hour')
cathode_current = make_feat('CATHODE CURRENT',
readonly=True,
doc='Laser cathode current (AC).',
units='A')
cathode_voltage = make_feat('CATHODE VOLTAGE',
readonly=True,
doc='Laser cathode voltage (AC).',
units='V')
time_to_start = make_feat('START',
readonly=True,
doc='Timer countdown during the start delay cycle.',
units='second')
@Feat()
def is_in_start_delay(self):
"""Laser is in start delay (tube not ionized)
"""
return self.query('LASER') == '1'
tube_time = make_feat('HOURS',
readonly=True,
doc='Number of operating hours on the plasma tube.',
units='hour')
tube_voltage = make_feat('TUBE VOLTAGE',
readonly=True,
doc='Laser tube voltage.',
units='V')
water_flow = make_feat('FLOW',
readonly=True,
doc='Water flow.',
units='gallons/minute')
water_resistivity = make_feat('WATER RESISTIVITY',
readonly=True,
doc='Resistivity of the incoming water to the power supply.',
units='kohm*cm')
water_temperature = make_feat('WATER TEMPERATURE',
doc='Temperature of the incoming water to the power supply.')
# Other
autofill_mode = make_feat('AUTOFILL',
doc='Autofill mode.',
values={'disabled': 0, 'enabled': 1,
'enabled until next autofill': 2})
laser_enabled = make_feat('LASER',
doc='Energize the power supply.',
values={True: 2, False: 0})
magnet_current = make_feat('MAGNET CURRENT',
readonly=True,
doc='Laser magnet current.',
units='A')
operating_mode = make_feat('MODE',
readonly=True,
doc='Laser operating mode.',
values={'current regulation': 0,
'reduced bandwidth light regulation': 1,
'standard light regulation': 2,
'current regulation, light regulation out of range': 3})
# Etalon
etalon_mode = make_feat('EMODE',
doc='Etalon mode.',
values={'manual': 0, 'modetrack': 1, 'modetune': 2})
etalon_temperature = make_feat('ETALON',
readonly=True,
doc='Etalon temperature.',
units='degC')
@Feat(units='degC', limits=(51.5, 54, 0.001))
def etalon_temperature_setpoint(self):
"""Setpoint for the etalon temperature.
"""
return self.query('PRINT SET ETALON')
@etalon_temperature_setpoint.setter
def etalon_temperature_setpoint(self, value):
self.query('ETALON={}'.format(value))
# Magnetic field
magnetic_field_high = make_feat('FIELD',
doc='Magnetic field.',
values={True: 1, False: 0})
@Feat(values={True: 1, False: 0})
def magnetic_field_setpoint_high(self):
"""Setpoint for magnetic field setting.
"""
return self.query('PRINT SET FIELD')
@magnetic_field_setpoint_high.setter
def magnetic_field_setpoint_high(self, value):
self.query('FIELD={}'.format(value))
# Light and current regulation
powertrack_mode_enabled = make_feat('PT',
doc='PowerTrack.',
values={True: 1, False: 0})
@DictFeat(keys=('A', 'B'), limits=(0, 255))
def powertrack_position(self, key):
"""Relative position of the PowerTrack solenoids.
"""
return self.query('PRINT PTDAC{}'.format(key))
@powertrack_position.setter
def powertrack_position(self, key, value):
self.query('PTDAC{}={}'.format(key, value))
@Action()
def recalibrate_powertrack(self):
"""Recalibrate PowerTrack. This will only execute if PowerTrack is on
and light regulation is off
"""
self.query('PT=2')
@Action()
def center_powertrack(self):
"""Center PowerTrack and turn it off.
"""
self.query('PT=3')
current = make_feat('CURRENT',
readonly=True,
doc='Current regulation mode.',
units='A')
@Feat(units='A', limits=(0, 50, 0.01))
def current_setpoint(self):
"""Current setpoint when using the current regulation mode.
"""
return self.query('PRINT SET CURRENT')
@current_setpoint.setter
def current_setpoint(self, value):
self.query('CURRENT={}'.format(value))
power = make_feat('LIGHT 3',
readonly=True,
doc='Current power output.',
units='A')
@Feat(units='W', limits=(0, 50, 0.0001))
def power_setpoint(self):
"""Setpoint for the light regulation.
"""
return self.query('PRINT SET LIGHT')
@power_setpoint.setter
def power_setpoint(self, value):
self.query('LIGHT={}'.format(value))
auto_light_cal_enabled = make_feat('AUTOLTCAL',
doc='Automatic light regulation calibration flag.',
values={True: 1, False: 0})
current_change_limit = make_feat('PCTCHGTILRECAL',
doc='Percent tube change before an automatic '\
'light regulation recalibration becomes '\
'necessary.',
units='', #TODO: %
limits=(5, 100, 1))
class ArgonInnova300C(Innova300C):
"""Argon Innova 300C.
"""
wavelength = make_feat('WAVELENGTH',
doc='Wavelength for the internal power meter calibration',
values={351, 364, 454, 457, 465, 472, 476, 488, 496, 501,
514, 528, 1090, 'MLVS', 'MLUV', 'MLDUV'})
class KryptonInnova300C(Innova300C):
"""Krypton Innova 300C.
"""
wavelength = make_feat('WAVELENGTH',
doc='Wavelength for the internal power meter calibration',
values={476, 482, 520, 530, 568, 647, 676, 752, 'MLVS',
'MLUV', 'MLVI', 'MLBG', 'MLRD', 'MLIR'})
if __name__ == '__main__':
import argparse
import lantz.log
parser = argparse.ArgumentParser(description='Test Kentech HRI')
parser.add_argument('-i', '--interactive', action='store_true',
default=False, help='Show interactive GUI')
parser.add_argument('-p', '--port', type=str, default='17',
help='Serial port to connect to')
args = parser.parse_args()
lantz.log.log_to_socket(lantz.log.DEBUG)
with Innova300C(args.port) as inst:
if args.interactive:
from lantz.ui.app import start_test_app
start_test_app(inst)
else:
print(inst.idn)
print(inst.software_rev)
print(inst.head_software_rev)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quotas for instances, and floating ips."""
import datetime
from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
quota_opts = [
cfg.IntOpt('quota_instances',
default=10,
help='number of instances allowed per project'),
cfg.IntOpt('quota_cores',
default=20,
help='number of instance cores allowed per project'),
cfg.IntOpt('quota_ram',
default=50 * 1024,
help='megabytes of instance ram allowed per project'),
cfg.IntOpt('quota_floating_ips',
default=10,
help='number of floating ips allowed per project'),
cfg.IntOpt('quota_metadata_items',
default=128,
help='number of metadata items allowed per instance'),
cfg.IntOpt('quota_injected_files',
default=5,
help='number of injected files allowed'),
cfg.IntOpt('quota_injected_file_content_bytes',
default=10 * 1024,
help='number of bytes allowed per injected file'),
cfg.IntOpt('quota_injected_file_path_bytes',
default=255,
help='number of bytes allowed per injected file path'),
cfg.IntOpt('quota_security_groups',
default=10,
help='number of security groups per project'),
cfg.IntOpt('quota_security_group_rules',
default=20,
help='number of security rules per security group'),
cfg.IntOpt('quota_key_pairs',
default=100,
help='number of key pairs per user'),
cfg.IntOpt('reservation_expire',
default=86400,
help='number of seconds until a reservation expires'),
cfg.IntOpt('until_refresh',
default=0,
help='count of reservations until usage is refreshed'),
cfg.IntOpt('max_age',
default=0,
help='number of seconds between subsequent usage refreshes'),
cfg.StrOpt('quota_driver',
default='nova.quota.DbQuotaDriver',
help='default driver to use for quota checks'),
]
CONF = cfg.CONF
CONF.register_opts(quota_opts)
class DbQuotaDriver(object):
"""
Driver to perform necessary checks to enforce quotas and obtain
quota information. The default driver utilizes the local
database.
"""
def get_by_project(self, context, project_id, resource):
"""Get a specific quota by project."""
return db.quota_get(context, project_id, resource)
def get_by_class(self, context, quota_class, resource):
"""Get a specific quota by quota class."""
return db.quota_class_get(context, quota_class, resource)
def get_defaults(self, context, resources):
"""Given a list of resources, retrieve the default quotas.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
"""
quotas = {}
for resource in resources.values():
quotas[resource.name] = resource.default
return quotas
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
"""
Given a list of resources, retrieve the quotas for the given
quota class.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param quota_class: The name of the quota class to return
quotas for.
:param defaults: If True, the default value will be reported
if there is no specific value for the
resource.
"""
quotas = {}
class_quotas = db.quota_class_get_all_by_name(context, quota_class)
for resource in resources.values():
if defaults or resource.name in class_quotas:
quotas[resource.name] = class_quotas.get(resource.name,
resource.default)
return quotas
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True,
usages=True):
"""
Given a list of resources, retrieve the quotas for the given
project.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param project_id: The ID of the project to return quotas for.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified. It
will be ignored if project_id ==
context.project_id.
:param defaults: If True, the quota class value (or the
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
:param usages: If True, the current in_use and reserved counts
will also be returned.
"""
quotas = {}
project_quotas = db.quota_get_all_by_project(context, project_id)
if usages:
project_usages = db.quota_usage_get_all_by_project(context,
project_id)
# Get the quotas for the appropriate class. If the project ID
# matches the one in the context, we use the quota_class from
# the context, otherwise, we use the provided quota_class (if
# any)
if project_id == context.project_id:
quota_class = context.quota_class
if quota_class:
class_quotas = db.quota_class_get_all_by_name(context, quota_class)
else:
class_quotas = {}
for resource in resources.values():
# Omit default/quota class values
if not defaults and resource.name not in project_quotas:
continue
quotas[resource.name] = dict(
limit=project_quotas.get(resource.name, class_quotas.get(
resource.name, resource.default)),
)
# Include usages if desired. This is optional because one
# internal consumer of this interface wants to access the
# usages directly from inside a transaction.
if usages:
usage = project_usages.get(resource.name, {})
quotas[resource.name].update(
in_use=usage.get('in_use', 0),
reserved=usage.get('reserved', 0),
)
return quotas
def _get_quotas(self, context, resources, keys, has_sync, project_id=None):
"""
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
context.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param keys: A list of the desired quotas to retrieve.
:param has_sync: If True, indicates that the resource must
have a sync attribute; if False, indicates
that the resource must NOT have a sync
attribute.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# Filter resources
if has_sync:
sync_filt = lambda x: hasattr(x, 'sync')
else:
sync_filt = lambda x: not hasattr(x, 'sync')
desired = set(keys)
sub_resources = dict((k, v) for k, v in resources.items()
if k in desired and sync_filt(v))
# Make sure we accounted for all of them...
if len(keys) != len(sub_resources):
unknown = desired - set(sub_resources.keys())
raise exception.QuotaResourceUnknown(unknown=sorted(unknown))
# Grab and return the quotas (without usages)
quotas = self.get_project_quotas(context, sub_resources,
project_id,
context.quota_class, usages=False)
return dict((k, v['limit']) for k, v in quotas.items())
def limit_check(self, context, resources, values, project_id=None):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
if unders:
raise exception.InvalidQuotaValue(unders=sorted(unders))
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
# Get the applicable quotas
quotas = self._get_quotas(context, resources, values.keys(),
has_sync=False, project_id=project_id)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
overs = [key for key, val in values.items()
if quotas[key] >= 0 and quotas[key] < val]
if overs:
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
def reserve(self, context, resources, deltas, expire=None,
project_id=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param deltas: A dictionary of the proposed delta changes.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# Set up the reservation expiration
if expire is None:
expire = CONF.reservation_expire
if isinstance(expire, (int, long)):
expire = datetime.timedelta(seconds=expire)
if isinstance(expire, datetime.timedelta):
expire = timeutils.utcnow() + expire
if not isinstance(expire, datetime.datetime):
raise exception.InvalidReservationExpiration(expire=expire)
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
# Get the applicable quotas.
# NOTE(Vek): We're not worried about races at this point.
# Yes, the admin may be in the process of reducing
# quotas, but that's a pretty rare thing.
quotas = self._get_quotas(context, resources, deltas.keys(),
has_sync=True, project_id=project_id)
# NOTE(Vek): Most of the work here has to be done in the DB
# API, because we have to do it in a transaction,
# which means access to the session. Since the
# session isn't available outside the DBAPI, we
# have to do the work there.
return db.quota_reserve(context, resources, quotas, deltas, expire,
CONF.until_refresh, CONF.max_age,
project_id=project_id)
def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
db.reservation_commit(context, reservations, project_id=project_id)
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
# If project_id is None, then we use the project_id in context
if project_id is None:
project_id = context.project_id
db.reservation_rollback(context, reservations, project_id=project_id)
def usage_reset(self, context, resources):
"""
Reset the usage records for a particular user on a list of
resources. This will force that user's usage records to be
refreshed the next time a reservation is made.
Note: this does not affect the currently outstanding
reservations the user has; those reservations must be
committed or rolled back (or expired).
:param context: The request context, for access checks.
:param resources: A list of the resource names for which the
usage must be reset.
"""
# We need an elevated context for the calls to
# quota_usage_update()
elevated = context.elevated()
for resource in resources:
try:
# Reset the usage to -1, which will force it to be
# refreshed
db.quota_usage_update(elevated, context.project_id,
resource, in_use=-1)
except exception.QuotaUsageNotFound:
# That means it'll be refreshed anyway
pass
def destroy_all_by_project(self, context, project_id):
"""
Destroy all quotas, usages, and reservations associated with a
project.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
db.quota_destroy_all_by_project(context, project_id)
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
db.reservation_expire(context)
class NoopQuotaDriver(object):
"""Driver that turns quotas calls into no-ops and pretends that quotas
for all resources are unlimited. This can be used if you do not
wish to have any quota checking. For instance, with nova compute
cells, the parent cell should do quota checking, but the child cell
should not.
"""
def get_by_project(self, context, project_id, resource):
"""Get a specific quota by project."""
# Unlimited
return -1
def get_by_class(self, context, quota_class, resource):
"""Get a specific quota by quota class."""
# Unlimited
return -1
def get_defaults(self, context, resources):
"""Given a list of resources, retrieve the default quotas.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
"""
quotas = {}
for resource in resources.values():
quotas[resource.name] = -1
return quotas
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
"""
Given a list of resources, retrieve the quotas for the given
quota class.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param quota_class: The name of the quota class to return
quotas for.
:param defaults: If True, the default value will be reported
if there is no specific value for the
resource.
"""
quotas = {}
for resource in resources.values():
quotas[resource.name] = -1
return quotas
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True,
usages=True):
"""
Given a list of resources, retrieve the quotas for the given
project.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param project_id: The ID of the project to return quotas for.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified. It
will be ignored if project_id ==
context.project_id.
:param defaults: If True, the quota class value (or the
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
:param usages: If True, the current in_use and reserved counts
will also be returned.
"""
quotas = {}
for resource in resources.values():
quotas[resource.name] = -1
return quotas
def limit_check(self, context, resources, values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
"""
pass
def reserve(self, context, resources, deltas, expire=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param deltas: A dictionary of the proposed delta changes.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
"""
return []
def commit(self, context, reservations):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
pass
def rollback(self, context, reservations):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
pass
def usage_reset(self, context, resources):
"""
Reset the usage records for a particular user on a list of
resources. This will force that user's usage records to be
refreshed the next time a reservation is made.
Note: this does not affect the currently outstanding
reservations the user has; those reservations must be
committed or rolled back (or expired).
:param context: The request context, for access checks.
:param resources: A list of the resource names for which the
usage must be reset.
"""
pass
def destroy_all_by_project(self, context, project_id):
"""
Destroy all quotas, usages, and reservations associated with a
project.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
pass
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
pass
class BaseResource(object):
"""Describe a single resource for quota checking."""
def __init__(self, name, flag=None):
"""
Initializes a Resource.
:param name: The name of the resource, i.e., "instances".
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
self.name = name
self.flag = flag
def quota(self, driver, context, **kwargs):
"""
Given a driver and context, obtain the quota for this
resource.
:param driver: A quota driver.
:param context: The request context.
:param project_id: The project to obtain the quota value for.
If not provided, it is taken from the
context. If it is given as None, no
project-specific quota will be searched
for.
:param quota_class: The quota class corresponding to the
project, or for which the quota is to be
looked up. If not provided, it is taken
from the context. If it is given as None,
no quota class-specific quota will be
searched for. Note that the quota class
defaults to the value in the context,
which may not correspond to the project if
project_id is not the same as the one in
the context.
"""
# Get the project ID
project_id = kwargs.get('project_id', context.project_id)
# Ditto for the quota class
quota_class = kwargs.get('quota_class', context.quota_class)
# Look up the quota for the project
if project_id:
try:
return driver.get_by_project(context, project_id, self.name)
except exception.ProjectQuotaNotFound:
pass
# Try for the quota class
if quota_class:
try:
return driver.get_by_class(context, quota_class, self.name)
except exception.QuotaClassNotFound:
pass
# OK, return the default
return self.default
@property
def default(self):
"""Return the default value of the quota."""
return CONF[self.flag] if self.flag else -1
class ReservableResource(BaseResource):
"""Describe a reservable resource."""
def __init__(self, name, sync, flag=None):
"""
Initializes a ReservableResource.
Reservable resources are those resources which directly
correspond to objects in the database, i.e., instances, cores,
etc. A ReservableResource must be constructed with a usage
synchronization function, which will be called to determine the
current counts of one or more resources.
The usage synchronization function will be passed three
arguments: an admin context, the project ID, and an opaque
session object, which should in turn be passed to the
underlying database function. Synchronization functions
should return a dictionary mapping resource names to the
current in_use count for those resources; more than one
resource and resource count may be returned. Note that
synchronization functions may be associated with more than one
ReservableResource.
:param name: The name of the resource, i.e., "instances".
:param sync: A callable which returns a dictionary to
resynchronize the in_use count for one or more
resources, as described above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(ReservableResource, self).__init__(name, flag=flag)
self.sync = sync
class AbsoluteResource(BaseResource):
"""Describe a non-reservable resource."""
pass
class CountableResource(AbsoluteResource):
"""
Describe a resource where the counts aren't based solely on the
project ID.
"""
def __init__(self, name, count, flag=None):
"""
Initializes a CountableResource.
Countable resources are those resources which directly
correspond to objects in the database, i.e., instances, cores,
etc., but for which a count by project ID is inappropriate. A
CountableResource must be constructed with a counting
function, which will be called to determine the current counts
of the resource.
The counting function will be passed the context, along with
the extra positional and keyword arguments that are passed to
Quota.count(). It should return an integer specifying the
count.
Note that this counting is not performed in a transaction-safe
manner. This resource class is a temporary measure to provide
required functionality, until a better approach to solving
this problem can be evolved.
:param name: The name of the resource, i.e., "instances".
:param count: A callable which returns the count of the
resource. The arguments passed are as described
above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(CountableResource, self).__init__(name, flag=flag)
self.count = count
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
if not quota_driver_class:
quota_driver_class = CONF.quota_driver
if isinstance(quota_driver_class, basestring):
quota_driver_class = importutils.import_object(quota_driver_class)
self._resources = {}
self._driver = quota_driver_class
def __contains__(self, resource):
return resource in self._resources
def register_resource(self, resource):
"""Register a resource."""
self._resources[resource.name] = resource
def register_resources(self, resources):
"""Register a list of resources."""
for resource in resources:
self.register_resource(resource)
def get_by_project(self, context, project_id, resource):
"""Get a specific quota by project."""
return self._driver.get_by_project(context, project_id, resource)
def get_by_class(self, context, quota_class, resource):
"""Get a specific quota by quota class."""
return self._driver.get_by_class(context, quota_class, resource)
def get_defaults(self, context):
"""Retrieve the default quotas.
:param context: The request context, for access checks.
"""
return self._driver.get_defaults(context, self._resources)
def get_class_quotas(self, context, quota_class, defaults=True):
"""Retrieve the quotas for the given quota class.
:param context: The request context, for access checks.
:param quota_class: The name of the quota class to return
quotas for.
:param defaults: If True, the default value will be reported
if there is no specific value for the
resource.
"""
return self._driver.get_class_quotas(context, self._resources,
quota_class, defaults=defaults)
def get_project_quotas(self, context, project_id, quota_class=None,
defaults=True, usages=True):
"""Retrieve the quotas for the given project.
:param context: The request context, for access checks.
:param project_id: The ID of the project to return quotas for.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified.
:param defaults: If True, the quota class value (or the
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
:param usages: If True, the current in_use and reserved counts
will also be returned.
"""
return self._driver.get_project_quotas(context, self._resources,
project_id,
quota_class=quota_class,
defaults=defaults,
usages=usages)
def count(self, context, resource, *args, **kwargs):
"""Count a resource.
For countable resources, invokes the count() function and
returns its result. Arguments following the context and
resource are passed directly to the count function declared by
the resource.
:param context: The request context, for access checks.
:param resource: The name of the resource, as a string.
"""
# Get the resource
res = self._resources.get(resource)
if not res or not hasattr(res, 'count'):
raise exception.QuotaResourceUnknown(unknown=[resource])
return res.count(context, *args, **kwargs)
def limit_check(self, context, project_id=None, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction. The
values to check are given as keyword arguments, where the key
identifies the specific quota limit to check, and the value is
the proposed value.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
return self._driver.limit_check(context, self._resources, values,
project_id=project_id)
def reserve(self, context, expire=None, project_id=None, **deltas):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas. The deltas are given as
keyword arguments, and current usage and other reservations
are factored into the quota check.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
reservations = self._driver.reserve(context, self._resources, deltas,
expire=expire,
project_id=project_id)
LOG.debug(_("Created reservations %(reservations)s") % locals())
return reservations
def commit(self, context, reservations, project_id=None):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
try:
self._driver.commit(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to commit reservations "
"%(reservations)s") % locals())
LOG.debug(_("Committed reservations %(reservations)s") % locals())
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
:param project_id: Specify the project_id if current context
is admin and admin wants to impact on
common user's tenant.
"""
try:
self._driver.rollback(context, reservations, project_id=project_id)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to roll back reservations "
"%(reservations)s") % locals())
LOG.debug(_("Rolled back reservations %(reservations)s") % locals())
def usage_reset(self, context, resources):
"""
Reset the usage records for a particular user on a list of
resources. This will force that user's usage records to be
refreshed the next time a reservation is made.
Note: this does not affect the currently outstanding
reservations the user has; those reservations must be
committed or rolled back (or expired).
:param context: The request context, for access checks.
:param resources: A list of the resource names for which the
usage must be reset.
"""
self._driver.usage_reset(context, resources)
def destroy_all_by_project(self, context, project_id):
"""
Destroy all quotas, usages, and reservations associated with a
project.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
self._driver.destroy_all_by_project(context, project_id)
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
self._driver.expire(context)
@property
def resources(self):
return sorted(self._resources.keys())
def _sync_instances(context, project_id, session):
return dict(zip(('instances', 'cores', 'ram'),
db.instance_data_get_for_project(
context, project_id, session=session)))
def _sync_floating_ips(context, project_id, session):
return dict(floating_ips=db.floating_ip_count_by_project(
context, project_id, session=session))
def _sync_security_groups(context, project_id, session):
return dict(security_groups=db.security_group_count_by_project(
context, project_id, session=session))
QUOTAS = QuotaEngine()
resources = [
ReservableResource('instances', _sync_instances, 'quota_instances'),
ReservableResource('cores', _sync_instances, 'quota_cores'),
ReservableResource('ram', _sync_instances, 'quota_ram'),
ReservableResource('floating_ips', _sync_floating_ips,
'quota_floating_ips'),
AbsoluteResource('metadata_items', 'quota_metadata_items'),
AbsoluteResource('injected_files', 'quota_injected_files'),
AbsoluteResource('injected_file_content_bytes',
'quota_injected_file_content_bytes'),
AbsoluteResource('injected_file_path_bytes',
'quota_injected_file_path_bytes'),
ReservableResource('security_groups', _sync_security_groups,
'quota_security_groups'),
CountableResource('security_group_rules',
db.security_group_rule_count_by_group,
'quota_security_group_rules'),
CountableResource('key_pairs', db.key_pair_count_by_user,
'quota_key_pairs'),
]
QUOTAS.register_resources(resources)
| |
"""
Tests for array coercion, mainly through testing `np.array` results directly.
Note that other such tests exist e.g. in `test_api.py` and many corner-cases
are tested (sometimes indirectly) elsewhere.
"""
import pytest
from pytest import param
from itertools import product
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_umath import _discover_array_parameters
from numpy.testing import (
assert_array_equal, assert_warns, IS_PYPY)
def arraylikes():
"""
Generator for functions converting an array into various array-likes.
If full is True (default) includes array-likes not capable of handling
all dtypes
"""
# base array:
def ndarray(a):
return a
yield param(ndarray, id="ndarray")
# subclass:
class MyArr(np.ndarray):
pass
def subclass(a):
return a.view(MyArr)
yield subclass
class _SequenceLike():
# We are giving a warning that array-like's were also expected to be
# sequence-like in `np.array([array_like])`, this can be removed
# when the deprecation exired (started NumPy 1.20)
def __len__(self):
raise TypeError
def __getitem__(self):
raise TypeError
# Array-interface
class ArrayDunder(_SequenceLike):
def __init__(self, a):
self.a = a
def __array__(self, dtype=None):
return self.a
yield param(ArrayDunder, id="__array__")
# memory-view
yield param(memoryview, id="memoryview")
# Array-interface
class ArrayInterface(_SequenceLike):
def __init__(self, a):
self.a = a # need to hold on to keep interface valid
self.__array_interface__ = a.__array_interface__
yield param(ArrayInterface, id="__array_interface__")
# Array-Struct
class ArrayStruct(_SequenceLike):
def __init__(self, a):
self.a = a # need to hold on to keep struct valid
self.__array_struct__ = a.__array_struct__
yield param(ArrayStruct, id="__array_struct__")
def scalar_instances(times=True, extended_precision=True, user_dtype=True):
# Hard-coded list of scalar instances.
# Floats:
yield param(np.sqrt(np.float16(5)), id="float16")
yield param(np.sqrt(np.float32(5)), id="float32")
yield param(np.sqrt(np.float64(5)), id="float64")
if extended_precision:
yield param(np.sqrt(np.longdouble(5)), id="longdouble")
# Complex:
yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
if extended_precision:
yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
# Bool:
# XFAIL: Bool should be added, but has some bad properties when it
# comes to strings, see also gh-9875
# yield param(np.bool_(0), id="bool")
# Integers:
yield param(np.int8(2), id="int8")
yield param(np.int16(2), id="int16")
yield param(np.int32(2), id="int32")
yield param(np.int64(2), id="int64")
yield param(np.uint8(2), id="uint8")
yield param(np.uint16(2), id="uint16")
yield param(np.uint32(2), id="uint32")
yield param(np.uint64(2), id="uint64")
# Rational:
if user_dtype:
yield param(rational(1, 2), id="rational")
# Cannot create a structured void scalar directly:
structured = np.array([(1, 3)], "i,i")[0]
assert isinstance(structured, np.void)
assert structured.dtype == np.dtype("i,i")
yield param(structured, id="structured")
if times:
# Datetimes and timedelta
yield param(np.timedelta64(2), id="timedelta64[generic]")
yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
# Strings and unstructured void:
yield param(np.bytes_(b"1234"), id="bytes")
yield param(np.unicode_("2345"), id="unicode")
yield param(np.void(b"4321"), id="unstructured_void")
def is_parametric_dtype(dtype):
"""Returns True if the the dtype is a parametric legacy dtype (itemsize
is 0, or a datetime without units)
"""
if dtype.itemsize == 0:
return True
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
if dtype.name.endswith("64"):
# Generic time units
return True
return False
class TestStringDiscovery:
@pytest.mark.parametrize("obj",
[object(), 1.2, 10**43, None, "string"],
ids=["object", "1.2", "10**43", "None", "string"])
def test_basic_stringlength(self, obj):
length = len(str(obj))
expected = np.dtype(f"S{length}")
assert np.array(obj, dtype="S").dtype == expected
assert np.array([obj], dtype="S").dtype == expected
# A nested array is also discovered correctly
arr = np.array(obj, dtype="O")
assert np.array(arr, dtype="S").dtype == expected
# Check that .astype() behaves identical
assert arr.astype("S").dtype == expected
@pytest.mark.parametrize("obj",
[object(), 1.2, 10**43, None, "string"],
ids=["object", "1.2", "10**43", "None", "string"])
def test_nested_arrays_stringlength(self, obj):
length = len(str(obj))
expected = np.dtype(f"S{length}")
arr = np.array(obj, dtype="O")
assert np.array([arr, arr], dtype="S").dtype == expected
@pytest.mark.parametrize("arraylike", arraylikes())
def test_unpack_first_level(self, arraylike):
# We unpack exactly one level of array likes
obj = np.array([None])
obj[0] = np.array(1.2)
# the length of the included item, not of the float dtype
length = len(str(obj[0]))
expected = np.dtype(f"S{length}")
obj = arraylike(obj)
# casting to string usually calls str(obj)
arr = np.array([obj], dtype="S")
assert arr.shape == (1, 1)
assert arr.dtype == expected
class TestScalarDiscovery:
def test_void_special_case(self):
# Void dtypes with structures discover tuples as elements
arr = np.array((1, 2, 3), dtype="i,i,i")
assert arr.shape == ()
arr = np.array([(1, 2, 3)], dtype="i,i,i")
assert arr.shape == (1,)
def test_char_special_case(self):
arr = np.array("string", dtype="c")
assert arr.shape == (6,)
assert arr.dtype.char == "c"
arr = np.array(["string"], dtype="c")
assert arr.shape == (1, 6)
assert arr.dtype.char == "c"
def test_char_special_case_deep(self):
# Check that the character special case errors correctly if the
# array is too deep:
nested = ["string"] # 2 dimensions (due to string being sequence)
for i in range(np.MAXDIMS - 2):
nested = [nested]
arr = np.array(nested, dtype='c')
assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
with pytest.raises(ValueError):
np.array([nested], dtype="c")
def test_unknown_object(self):
arr = np.array(object())
assert arr.shape == ()
assert arr.dtype == np.dtype("O")
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar(self, scalar):
arr = np.array(scalar)
assert arr.shape == ()
assert arr.dtype == scalar.dtype
arr = np.array([[scalar, scalar]])
assert arr.shape == (1, 2)
assert arr.dtype == scalar.dtype
# Additionally to string this test also runs into a corner case
# with datetime promotion (the difference is the promotion order).
@pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
def test_scalar_promotion(self):
for sc1, sc2 in product(scalar_instances(), scalar_instances()):
sc1, sc2 = sc1.values[0], sc2.values[0]
# test all combinations:
try:
arr = np.array([sc1, sc2])
except (TypeError, ValueError):
# The promotion between two times can fail
# XFAIL (ValueError): Some object casts are currently undefined
continue
assert arr.shape == (2,)
try:
dt1, dt2 = sc1.dtype, sc2.dtype
expected_dtype = np.promote_types(dt1, dt2)
assert arr.dtype == expected_dtype
except TypeError as e:
# Will currently always go to object dtype
assert arr.dtype == np.dtype("O")
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar_coercion(self, scalar):
# This tests various scalar coercion paths, mainly for the numerical
# types. It includes some paths not directly related to `np.array`
if isinstance(scalar, np.inexact):
# Ensure we have a full-precision number if available
scalar = type(scalar)((scalar * 2)**0.5)
if type(scalar) is rational:
# Rational generally fails due to a missing cast. In the future
# object casts should automatically be defined based on `setitem`.
pytest.xfail("Rational to object cast is undefined currently.")
# Use casting from object:
arr = np.array(scalar, dtype=object).astype(scalar.dtype)
# Test various ways to create an array containing this scalar:
arr1 = np.array(scalar).reshape(1)
arr2 = np.array([scalar])
arr3 = np.empty(1, dtype=scalar.dtype)
arr3[0] = scalar
arr4 = np.empty(1, dtype=scalar.dtype)
arr4[:] = [scalar]
# All of these methods should yield the same results
assert_array_equal(arr, arr1)
assert_array_equal(arr, arr2)
assert_array_equal(arr, arr3)
assert_array_equal(arr, arr4)
@pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
@pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
@pytest.mark.parametrize("cast_to", scalar_instances())
def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
"""
Test that in most cases:
* `np.array(scalar, dtype=dtype)`
* `np.empty((), dtype=dtype)[()] = scalar`
* `np.array(scalar).astype(dtype)`
should behave the same. The only exceptions are paramteric dtypes
(mainly datetime/timedelta without unit) and void without fields.
"""
dtype = cast_to.dtype # use to parametrize only the target dtype
for scalar in scalar_instances(times=False):
scalar = scalar.values[0]
if dtype.type == np.void:
if scalar.dtype.fields is not None and dtype.fields is None:
# Here, coercion to "V6" works, but the cast fails.
# Since the types are identical, SETITEM takes care of
# this, but has different rules than the cast.
with pytest.raises(TypeError):
np.array(scalar).astype(dtype)
np.array(scalar, dtype=dtype)
np.array([scalar], dtype=dtype)
continue
# The main test, we first try to use casting and if it succeeds
# continue below testing that things are the same, otherwise
# test that the alternative paths at least also fail.
try:
cast = np.array(scalar).astype(dtype)
except (TypeError, ValueError, RuntimeError):
# coercion should also raise (error type may change)
with pytest.raises(Exception):
np.array(scalar, dtype=dtype)
if (isinstance(scalar, rational) and
np.issubdtype(dtype, np.signedinteger)):
return
with pytest.raises(Exception):
np.array([scalar], dtype=dtype)
# assignment should also raise
res = np.zeros((), dtype=dtype)
with pytest.raises(Exception):
res[()] = scalar
return
# Non error path:
arr = np.array(scalar, dtype=dtype)
assert_array_equal(arr, cast)
# assignment behaves the same
ass = np.zeros((), dtype=dtype)
ass[()] = scalar
assert_array_equal(ass, cast)
@pytest.mark.parametrize("dtype_char", np.typecodes["All"])
def test_default_dtype_instance(self, dtype_char):
if dtype_char in "SU":
dtype = np.dtype(dtype_char + "1")
elif dtype_char == "V":
# Legacy behaviour was to use V8. The reason was float64 being the
# default dtype and that having 8 bytes.
dtype = np.dtype("V8")
else:
dtype = np.dtype(dtype_char)
discovered_dtype, _ = _discover_array_parameters([], type(dtype))
assert discovered_dtype == dtype
assert discovered_dtype.itemsize == dtype.itemsize
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
def test_scalar_to_int_coerce_does_not_cast(self, dtype):
"""
Signed integers are currently different in that they do not cast other
NumPy scalar, but instead use scalar.__int__(). The harcoded
exception to this rule is `np.array(scalar, dtype=integer)`.
"""
dtype = np.dtype(dtype)
invalid_int = np.ulonglong(-1)
float_nan = np.float64(np.nan)
for scalar in [float_nan, invalid_int]:
# This is a special case using casting logic and thus not failing:
coerced = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
assert_array_equal(coerced, cast)
# However these fail:
with pytest.raises((ValueError, OverflowError)):
np.array([scalar], dtype=dtype)
with pytest.raises((ValueError, OverflowError)):
cast[()] = scalar
class TestTimeScalars:
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
@pytest.mark.parametrize("scalar",
[param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
param(np.timedelta64(123, "s"), id="timedelta64[s]"),
param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
param(np.datetime64(1, "D"), id="datetime64[D]")],)
def test_coercion_basic(self, dtype, scalar):
# Note the `[scalar]` is there because np.array(scalar) uses stricter
# `scalar.__int__()` rules for backward compatibility right now.
arr = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
assert_array_equal(arr, cast)
ass = np.ones((), dtype=dtype)
if issubclass(dtype, np.integer):
with pytest.raises(TypeError):
# raises, as would np.array([scalar], dtype=dtype), this is
# conversion from times, but behaviour of integers.
ass[()] = scalar
else:
ass[()] = scalar
assert_array_equal(ass, cast)
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
@pytest.mark.parametrize("scalar",
[param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
# Only "ns" and "generic" timedeltas can be converted to numbers
# so these are slightly special.
arr = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
ass = np.ones((), dtype=dtype)
ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype)
assert_array_equal(arr, cast)
assert_array_equal(cast, cast)
@pytest.mark.parametrize("dtype", ["S6", "U6"])
@pytest.mark.parametrize(["val", "unit"],
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
def test_coercion_assignment_datetime(self, val, unit, dtype):
# String from datetime64 assignment is currently special cased to
# never use casting. This is because casting will error in this
# case, and traditionally in most cases the behaviour is maintained
# like this. (`np.array(scalar, dtype="U6")` would have failed before)
# TODO: This discrepency _should_ be resolved, either by relaxing the
# cast, or by deprecating the first part.
scalar = np.datetime64(val, unit)
dtype = np.dtype(dtype)
cut_string = dtype.type(str(scalar)[:6])
arr = np.array(scalar, dtype=dtype)
assert arr[()] == cut_string
ass = np.ones((), dtype=dtype)
ass[()] = scalar
assert ass[()] == cut_string
with pytest.raises(RuntimeError):
# However, unlike the above assignment using `str(scalar)[:6]`
# due to being handled by the string DType and not be casting
# the explicit cast fails:
np.array(scalar).astype(dtype)
@pytest.mark.parametrize(["val", "unit"],
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
def test_coercion_assignment_timedelta(self, val, unit):
scalar = np.timedelta64(val, unit)
# Unlike datetime64, timedelta allows the unsafe cast:
np.array(scalar, dtype="S6")
cast = np.array(scalar).astype("S6")
ass = np.ones((), dtype="S6")
ass[()] = scalar
expected = scalar.astype("S")[:6]
assert cast[()] == expected
assert ass[()] == expected
class TestNested:
def test_nested_simple(self):
initial = [1.2]
nested = initial
for i in range(np.MAXDIMS - 1):
nested = [nested]
arr = np.array(nested, dtype="float64")
assert arr.shape == (1,) * np.MAXDIMS
with pytest.raises(ValueError):
np.array([nested], dtype="float64")
# We discover object automatically at this time:
with assert_warns(np.VisibleDeprecationWarning):
arr = np.array([nested])
assert arr.dtype == np.dtype("O")
assert arr.shape == (1,) * np.MAXDIMS
assert arr.item() is initial
def test_pathological_self_containing(self):
# Test that this also works for two nested sequences
l = []
l.append(l)
arr = np.array([l, l, l], dtype=object)
assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
# Also check a ragged case:
arr = np.array([l, [None], l], dtype=object)
assert arr.shape == (3, 1)
@pytest.mark.parametrize("arraylike", arraylikes())
def test_nested_arraylikes(self, arraylike):
# We try storing an array like into an array, but the array-like
# will have too many dimensions. This means the shape discovery
# decides that the array-like must be treated as an object (a special
# case of ragged discovery). The result will be an array with one
# dimension less than the maximum dimensions, and the array being
# assigned to it (which does work for object or if `float(arraylike)`
# works).
initial = arraylike(np.ones((1, 1)))
nested = initial
for i in range(np.MAXDIMS - 1):
nested = [nested]
with pytest.warns(DeprecationWarning):
# It will refuse to assign the array into
np.array(nested, dtype="float64")
# If this is object, we end up assigning a (1, 1) array into (1,)
# (due to running out of dimensions), this is currently supported but
# a special case which is not ideal.
arr = np.array(nested, dtype=object)
assert arr.shape == (1,) * np.MAXDIMS
assert arr.item() == np.array(initial).item()
@pytest.mark.parametrize("arraylike", arraylikes())
def test_uneven_depth_ragged(self, arraylike):
arr = np.arange(4).reshape((2, 2))
arr = arraylike(arr)
# Array is ragged in the second dimension already:
out = np.array([arr, [arr]], dtype=object)
assert out.shape == (2,)
assert out[0] is arr
assert type(out[1]) is list
# Array is ragged in the third dimension:
with pytest.raises(ValueError):
# This is a broadcast error during assignment, because
# the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
np.array([arr, [arr, arr]], dtype=object)
def test_empty_sequence(self):
arr = np.array([[], [1], [[1]]], dtype=object)
assert arr.shape == (3,)
# The empty sequence stops further dimension discovery, so the
# result shape will be (0,) which leads to an error during:
with pytest.raises(ValueError):
np.array([[], np.empty((0, 1))], dtype=object)
def test_array_of_different_depths(self):
# When multiple arrays (or array-likes) are included in a
# sequences and have different depth, we currently discover
# as many dimensions as they share. (see also gh-17224)
arr = np.zeros((3, 2))
mismatch_first_dim = np.zeros((1, 2))
mismatch_second_dim = np.zeros((3, 3))
dtype, shape = _discover_array_parameters(
[arr, mismatch_second_dim], dtype=np.dtype("O"))
assert shape == (2, 3)
dtype, shape = _discover_array_parameters(
[arr, mismatch_first_dim], dtype=np.dtype("O"))
assert shape == (2,)
# The second case is currently supported because the arrays
# can be stored as objects:
res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
assert res[0] is arr
assert res[1] is mismatch_first_dim
class TestBadSequences:
# These are tests for bad objects passed into `np.array`, in general
# these have undefined behaviour. In the old code they partially worked
# when now they will fail. We could (and maybe should) create a copy
# of all sequences to be safe against bad-actors.
def test_growing_list(self):
# List to coerce, `mylist` will append to it during coercion
obj = []
class mylist(list):
def __len__(self):
obj.append([1, 2])
return super().__len__()
obj.append(mylist([1, 2]))
with pytest.raises(RuntimeError):
np.array(obj)
# Note: We do not test a shrinking list. These do very evil things
# and the only way to fix them would be to copy all sequences.
# (which may be a real option in the future).
def test_mutated_list(self):
# List to coerce, `mylist` will mutate the first element
obj = []
class mylist(list):
def __len__(self):
obj[0] = [2, 3] # replace with a different list.
return super().__len__()
obj.append([2, 3])
obj.append(mylist([1, 2]))
with pytest.raises(RuntimeError):
np.array(obj)
def test_replace_0d_array(self):
# List to coerce, `mylist` will mutate the first element
obj = []
class baditem:
def __len__(self):
obj[0][0] = 2 # replace with a different list.
raise ValueError("not actually a sequence!")
def __getitem__(self):
pass
# Runs into a corner case in the new code, the `array(2)` is cached
# so replacing it invalidates the cache.
obj.append([np.array(2), baditem()])
with pytest.raises(RuntimeError):
np.array(obj)
class TestArrayLikes:
@pytest.mark.parametrize("arraylike", arraylikes())
def test_0d_object_special_case(self, arraylike):
arr = np.array(0.)
obj = arraylike(arr)
# A single array-like is always converted:
res = np.array(obj, dtype=object)
assert_array_equal(arr, res)
# But a single 0-D nested array-like never:
res = np.array([obj], dtype=object)
assert res[0] is obj
def test_0d_generic_special_case(self):
class ArraySubclass(np.ndarray):
def __float__(self):
raise TypeError("e.g. quantities raise on this")
arr = np.array(0.)
obj = arr.view(ArraySubclass)
res = np.array(obj)
# The subclass is simply cast:
assert_array_equal(arr, res)
# If the 0-D array-like is included, __float__ is currently
# guaranteed to be used. We may want to change that, quantities
# and masked arrays half make use of this.
with pytest.raises(TypeError):
np.array([obj])
# The same holds for memoryview:
obj = memoryview(arr)
res = np.array(obj)
assert_array_equal(arr, res)
with pytest.raises(ValueError):
# The error type does not matter much here.
np.array([obj])
def test_arraylike_classes(self):
# The classes of array-likes should generally be acceptable to be
# stored inside a numpy (object) array. This tests all of the
# special attributes (since all are checked during coercion).
arr = np.array(np.int64)
assert arr[()] is np.int64
arr = np.array([np.int64])
assert arr[0] is np.int64
# This also works for properties/unbound methods:
class ArrayLike:
@property
def __array_interface__(self):
pass
@property
def __array_struct__(self):
pass
def __array__(self):
pass
arr = np.array(ArrayLike)
assert arr[()] is ArrayLike
arr = np.array([ArrayLike])
assert arr[0] is ArrayLike
@pytest.mark.skipif(
np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
def test_too_large_array_error_paths(self):
"""Test the error paths, including for memory leaks"""
arr = np.array(0, dtype="uint8")
# Guarantees that a contiguous copy won't work:
arr = np.broadcast_to(arr, 2**62)
for i in range(5):
# repeat, to ensure caching cannot have an effect:
with pytest.raises(MemoryError):
np.array(arr)
with pytest.raises(MemoryError):
np.array([arr])
@pytest.mark.parametrize("attribute",
["__array_interface__", "__array__", "__array_struct__"])
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
def test_bad_array_like_attributes(self, attribute, error):
# RecursionError and MemoryError are considered fatal. All errors
# (except AttributeError) should probably be raised in the future,
# but shapely made use of it, so it will require a deprecation.
class BadInterface:
def __getattr__(self, attr):
if attr == attribute:
raise error
super().__getattr__(attr)
with pytest.raises(error):
np.array(BadInterface())
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
def test_bad_array_like_bad_length(self, error):
# RecursionError and MemoryError are considered "critical" in
# sequences. We could expand this more generally though. (NumPy 1.20)
class BadSequence:
def __len__(self):
raise error
def __getitem__(self):
# must have getitem to be a Sequence
return 1
with pytest.raises(error):
np.array(BadSequence())
| |
# coding: utf-8
""" Tests for infer_potential.py """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
import logging
import shutil
import time
# Third-party
import astropy.units as u
from astropy.utils import isiterable
import h5py
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import triangle
# Project
from streams.coordinates.frame import galactocentric, heliocentric
import streams.io as io
import streams.inference as si
from streams.inference.back_integrate import back_integration_likelihood
import streams.potential as sp
from streams.util import project_root
matplotlib.rc('lines', marker=None, linestyle='-')
# Create logger
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
minimum_config = """
name: test
data_file: data/observed_particles/2.5e8.hdf5
nparticles: 4
particle_idx: [3, 3667, 710, 1576]
potential:
class_name: LawMajewski2010
{potential_params}
particles:
{particles_params}
satellite:
{satellite_params}
"""
pot_params = """
parameters: [q1, qz, phi, v_halo]
"""
ptc_params = """
parameters: [d]
"""
# parameters: [d, mul, mub, vr]
sat_params = """
parameters: [alpha]
"""
# parameters: [logmass, logmdot, d, mul, mub, vr]
# _config = minimum_config.format(potential_params=pot_params,
# particles_params=ptc_params,
# satellite_params=sat_params)
_config = minimum_config.format(potential_params=pot_params,
particles_params="",
satellite_params=sat_params)
# particles_params=ptc_params,
# satellite_params=sat_params
Ncoarse = 21
Nfine = 71
output_path = os.path.join(project_root, 'plots', 'tests', 'infer_potential')
if not os.path.exists(output_path):
os.mkdir(output_path)
def make_plot(model, idx, vals1, vals2):
fig,axes = plt.subplots(2,2,figsize=(12,12),sharex='col')
p = model.truths.copy()
Ls = []
for val in vals1:
p[idx] = val
Ls.append(model(p))
axes[0,0].plot(vals1, Ls)
axes[0,0].set_ylabel("$\ln\mathcal{L}$")
axes[1,0].plot(vals1, np.exp(Ls-np.max(Ls)))
axes[1,0].set_ylabel("$\mathcal{L}$")
p = model.truths.copy()
Ls = []
for val in vals2:
p[idx] = val
Ls.append(model(p))
#logger.debug("{} vs truth {}".format(vals[np.argmax(Ls)], truth))
#logger.debug("{:.2f}% off".format(abs(vals[np.argmax(Ls)] - truth)/truth*100.))
axes[0,1].set_title("zoomed")
axes[0,1].plot(vals2, Ls)
axes[1,1].plot(vals2, np.exp(Ls-np.max(Ls)))
for ax in axes.flat:
ax.axvline(model.truths[idx])
return fig
class TestStreamModel(object):
def setup(self):
config = io.read_config(_config)
self.model = si.StreamModel.from_config(config)
self.model.sample_priors()
def test_simple(self):
# make sure true posterior value is higher than any randomly sampled value
logger.debug("Checking posterior values...")
true_ln_p = self.model.ln_posterior(self.model.truths, *self.model.lnpargs)
true_ln_p2 = self.model(self.model.truths)
logger.debug("\t\t At truth: {}".format(true_ln_p))
p0 = self.model.sample_priors()
ln_p = self.model.ln_posterior(p0, *self.model.lnpargs)
ln_p2 = self.model(p0)
logger.debug("\t\t At random sample: {}".format(ln_p))
assert true_ln_p > ln_p
assert true_ln_p == true_ln_p2
assert ln_p == ln_p2
def test_model(self):
""" Simple test of posterior """
model = self.model
test_path = os.path.join(output_path, "model")
if not os.path.exists(test_path):
os.mkdir(test_path)
truth_dict = model._decompose_vector(model.truths)
model.sample_priors()
idx = 0
for group_name,group in truth_dict.items():
for param_name,truths in group.items():
print(group_name, param_name)
param = model.parameters[group_name][param_name]
if group_name == "potential":
vals1 = np.linspace(param._prior.a,
param._prior.b,
Ncoarse)
vals2 = np.linspace(0.9,1.1,Nfine)*truths
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path, "{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
if group_name == "particles":
if param_name in heliocentric.coord_names:
for jj in range(param.value.shape[0]):
prior = model._prior_cache[("particles",param_name)]
truth = truths[jj]
mu,sigma = truth,prior.sigma[jj]
vals1 = np.linspace(mu-10*sigma,
mu+10*sigma,
Ncoarse)
vals2 = np.linspace(mu-3*sigma,
mu+3*sigma,
Nfine)
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"ptcl{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == 'p_shocked':
for jj in range(param.value.shape[0]):
vals1 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Ncoarse)
vals2 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Nfine)
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"ptcl{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == 'beta':
for jj in range(param.value.shape[0]):
vals1 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Ncoarse)
vals2 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Nfine)
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"ptcl{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == 'tub':
for jj in range(param.value.shape[0]):
vals1 = np.linspace(param._prior.a[jj],
param._prior.b[jj],
Ncoarse)
vals2 = np.linspace(0.9,1.1,Nfine)*truths[jj]
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"ptcl{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
if group_name == "satellite":
if param_name in heliocentric.coord_names:
for jj in range(param.value.shape[0]):
prior = model._prior_cache[("satellite",param_name)]
truth = truths
mu,sigma = truth,prior.sigma
vals1 = np.linspace(mu-10*sigma,
mu+10*sigma,
Ncoarse)
vals2 = np.linspace(mu-3*sigma,
mu+3*sigma,
Nfine)
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path,
"sat{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == "logmass":
vals1 = np.linspace(param._prior.a,
param._prior.b,
Ncoarse)
vals2 = np.linspace(0.9,1.1,Nfine)*truths
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path, "sat{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == "logmdot":
vals1 = np.linspace(param._prior.a,
param._prior.b,
Ncoarse)
vals2 = np.linspace(0.9,1.1,Nfine)*truths
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path, "sat{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
elif param_name == "alpha":
vals1 = np.linspace(0.5, 3.5, Ncoarse)
vals2 = np.linspace(0.9, 1.1, Nfine)*truths
fig = make_plot(model, idx, vals1, vals2)
fig.savefig(os.path.join(test_path, "sat{}_{}.png".format(idx,param_name)))
plt.close('all')
idx += 1
def test_sample_priors(self):
test_path = os.path.join(output_path, "model", "priors")
if not os.path.exists(test_path):
os.mkdir(test_path)
p = self.model.sample_priors(size=100).T
#ptruths = self.model.start_truths(size=100).T
ptruths = self.model.sample_priors(size=100, start_truth=True).T
plt.figure(figsize=(5,5))
for ii,(vals,truevals) in enumerate(zip(p,ptruths)):
n,bins,pat = plt.hist(vals, bins=25, alpha=0.5)
plt.hist(truevals, bins=25, alpha=0.5)
plt.savefig(os.path.join(test_path, "{}.png".format(ii)))
plt.clf()
return
def test_per_particle(self):
_c = minimum_config.format(potential_params=pot_params,
particles_params="",
satellite_params=sat_params)
config = io.read_config(_c)
model = si.StreamModel.from_config(config)
model.sample_priors()
test_path = os.path.join(output_path, "model")
if not os.path.exists(test_path):
os.mkdir(test_path)
# likelihood args
t1, t2, dt = model.lnpargs
p_gc = model.true_particles.to_frame(galactocentric)._X
s_gc = model.true_satellite.to_frame(galactocentric)._X
logmass = model.satellite.logmass.truth
logmdot = model.satellite.logmdot.truth
#true_alpha = model.satellite.alpha.truth
true_alpha = 1.4
beta = model.particles.beta.truth
tub = model.particles.tub.truth
truth_dict = model._decompose_vector(model.truths)
group = truth_dict['potential']
for param_name,truths in group.items():
print(param_name)
param = model.parameters['potential'][param_name]
vals = np.linspace(0.9,1.1,Nfine)*truths
pparams = dict()
Ls = []
for val in vals:
pparams[param_name] = val
potential = model._potential_class(**pparams)
ln_like = back_integration_likelihood(t1, t2, dt,
potential, p_gc, s_gc,
logmass, logmdot,
beta, true_alpha, tub)
Ls.append(ln_like)
Ls = np.array(Ls).T
fig,ax = plt.subplots(1,1,figsize=(8,8))
for ii,Lvec in enumerate(Ls):
ax.plot(vals,Lvec,marker=None,linestyle='-',
label=str(ii), alpha=0.5)
if param_name == "v_halo":
ax.set_ylim(-300,50)
ax.axvline(truths)
ax.legend(loc='lower right', fontsize=14)
fig.savefig(os.path.join(test_path, "per_particle_{}.png".format(param_name)))
#########################
# alpha
param = model.parameters['satellite']['alpha']
vals = np.linspace(0.5,2.5,Nfine)
potential = model._potential_class()
Ls = []
for val in vals:
ln_like = back_integration_likelihood(t1, t2, dt,
potential, p_gc, s_gc,
logmass, logmdot,
beta, val, tub)
Ls.append(ln_like)
Ls = np.array(Ls).T
fig,ax = plt.subplots(1,1,figsize=(8,8))
for ii,Lvec in enumerate(Ls):
ax.plot(vals,Lvec,marker=None,linestyle='-',
label=str(ii), alpha=0.5)
ax.axvline(true_alpha)
ax.legend(loc='lower right', fontsize=14)
fig.savefig(os.path.join(test_path, "per_particle_alpha.png"))
plt.close('all')
def test_coordinate_constraints(self):
""" Want to test that having a missing dimension, other coordinates
place constraints on the missing one.
"""
test_path = os.path.join(output_path, "model", "coords")
if not os.path.exists(test_path):
os.mkdir(test_path)
ptc_params = """
parameters: [l,b,d,mul,mub,vr]
missing_dims: [l,b,d,mul,mub,vr]
"""
sat_params = """
parameters: [l,b,d,mul,mub,vr]
missing_dims: [l,b,d,mul,mub,vr]
"""
_config = minimum_config.format(potential_params="",
particles_params=ptc_params,
satellite_params=sat_params)
config = io.read_config(_config)
model = si.StreamModel.from_config(config)
model.sample_priors()
ix = -3
truth = model.truths[ix]
vals = np.linspace(-0.02, 0., Nfine)
#vals = np.linspace(-0.012,-0.003,Nfine)
Ls = []
for val in vals:
p = model.truths.copy()
p[ix] = val
Ls.append(model(p))
Ls = np.array(Ls)
fig,ax = plt.subplots(1,1,figsize=(8,8))
ax.plot(vals,Ls,marker=None,linestyle='-')
ax.axvline(truth)
fig.savefig(os.path.join(test_path, "{}.png".format("mul")))
fig,ax = plt.subplots(1,1,figsize=(8,8))
ax.plot(vals,np.exp(Ls-np.max(Ls)),marker=None,linestyle='-')
ax.axvline(truth)
fig.savefig(os.path.join(test_path, "{}_exp.png".format("mul")))
if __name__ == "__main__":
import cProfile
import pstats
c = io.read_config(lm10_c)
model = si.StreamModel.from_config(c)
potential = model._potential_class(**model._given_potential_params)
cProfile.run('time_likelihood(model, potential)', 'likelihood_stats')
p = pstats.Stats('likelihood_stats')
p.strip_dirs().sort_stats('cumulative').print_stats(25)
cProfile.run('time_posterior(model)', 'posterior_stats')
p = pstats.Stats('posterior_stats')
p.strip_dirs().sort_stats('cumulative').print_stats(25)
| |
#!/usr/bin/env python2.7
# Copyright 2013 Virantha Ekanayake All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import smtplib
import argparse
import sys, os, traceback, time
import logging
import shutil, glob
import itertools
from functools import wraps
from version import __version__
from PIL import Image
import yaml
import multiprocessing
# Replace the Popen routine to allow win32 pyinstaller to build
from multiprocessing import forking
from pypdfocr_multiprocessing import _Popen
forking.Popen = _Popen
from pypdfocr_pdf import PyPdf
from pypdfocr_tesseract import PyTesseract
from pypdfocr_gs import PyGs
from pypdfocr_watcher import PyPdfWatcher
from pypdfocr_pdffiler import PyPdfFiler
from pypdfocr_filer_dirs import PyFilerDirs
from pypdfocr_filer_evernote import PyFilerEvernote
from pypdfocr_preprocess import PyPreprocess
def error(text):
print("ERROR: %s" % text)
sys.exit(-1)
# decorator to retry multiple times
def retry(count=5, exc_type = Exception):
def decorator(func):
@wraps(func)
def result(*args, **kwargs):
for _ in range(count):
try:
return func(*args, **kwargs)
except exc_type:
pass
raise
return result
return decorator
@retry(count=6, exc_type=IOError)
def open_file_with_timeout(parser, arg):
f = open(arg, 'r')
return f
"""
Make scanned PDFs searchable using Tesseract-OCR and autofile them
.. automodule:: pypdfocr
:private-members:
"""
class PyPDFOCR(object):
"""
The main clas. Performs the following functions:
* Parses command line options
* Optionally just watches a directory for new PDF's to OCR; once a file appears, it does the next step
* Runs a single file conversion:
* Runs ghostscript to get tiff/jpg
* Runs Tesseract-OCR to do the actual OCR
* Takes the HOCR from Tesseract and creates a new PDF with the text overlay
* Files the OCR'ed file in the proper place if specified
* Files the original file if specified
*
"""
def __init__ (self):
""" Initializes the GhostScript, Tesseract, and PDF helper classes.
"""
self.config = {}
def _get_config_file(self, config_file):
"""
Read in the yaml config file
:param config_file: Configuration file (YAML format)
:type config_file: file
:returns: dict of yaml file
:rtype: dict
"""
with config_file:
myconfig = yaml.load(config_file)
return myconfig
def get_options(self, argv):
"""
Parse the command-line options and set the following object properties:
:param argv: usually just sys.argv[1:]
:returns: Nothing
:ivar debug: Enable logging debug statements
:ivar verbose: Enable verbose logging
:ivar enable_filing: Whether to enable post-OCR filing of PDFs
:ivar pdf_filename: Filename for single conversion mode
:ivar watch_dir: Directory to watch for files to convert
:ivar config: Dict of the config file
:ivar watch: Whether folder watching mode is turned on
:ivar enable_evernote: Enable filing to evernote
"""
p = argparse.ArgumentParser(
description = "Convert scanned PDFs into their OCR equivalent. Depends on GhostScript and Tesseract-OCR being installed.",
epilog = "PyPDFOCR version %s (Copyright 2013 Virantha Ekanayake)" % __version__,
)
p.add_argument('-d', '--debug', action='store_true',
default=False, dest='debug', help='Turn on debugging')
p.add_argument('-v', '--verbose', action='store_true',
default=False, dest='verbose', help='Turn on verbose mode')
p.add_argument('-m', '--mail', action='store_true',
default=False, dest='mail', help='Send email after conversion')
p.add_argument('-l', '--lang',
default='eng', dest='lang', help='Language(default eng)')
p.add_argument('--preprocess', action='store_true',
default=False, dest='preprocess', help='Enable preprocessing. Not really useful now with improved Tesseract 3.04+')
p.add_argument('--skip-preprocess', action='store_true',
default=False, dest='skip_preprocess', help='DEPRECATED: always skips now.')
#---------
# Single or watch mode
#--------
single_or_watch_group = p.add_mutually_exclusive_group(required=True)
# Positional argument for single file conversion
single_or_watch_group.add_argument("pdf_filename", nargs="?", help="Scanned pdf file to OCR")
# Watch directory for watch mode
single_or_watch_group.add_argument('-w', '--watch',
dest='watch_dir', help='Watch given directory and run ocr automatically until terminated')
#-----------
# Filing options
#----------
filing_group = p.add_argument_group(title="Filing optinos")
filing_group.add_argument('-f', '--file', action='store_true',
default=False, dest='enable_filing', help='Enable filing of converted PDFs')
#filing_group.add_argument('-c', '--config', type = argparse.FileType('r'),
filing_group.add_argument('-c', '--config', type = lambda x: open_file_with_timeout(p,x),
dest='configfile', help='Configuration file for defaults and PDF filing')
filing_group.add_argument('-e', '--evernote', action='store_true',
default=False, dest='enable_evernote', help='Enable filing to Evernote')
filing_group.add_argument('-n', action='store_true',
default=False, dest='match_using_filename', help='Use filename to match if contents did not match anything, before filing to default folder')
# Add flow option to single mode extract_images,preprocess,ocr,write
args = p.parse_args(argv)
self.debug = args.debug
self.verbose = args.verbose
self.pdf_filename = args.pdf_filename
self.lang = args.lang
self.watch_dir = args.watch_dir
self.enable_email = args.mail
self.match_using_filename = args.match_using_filename
# Deprecating skip_preprocess to make skipping the default (always true). Tesseract 3.04 is so much better now
# at handling non-ideal inputs and lines
if args.skip_preprocess:
print("Warning: --skip_preprocess is not needed anymore (defaults to skipping preprocessing). If you want to enable preprocessing, use the new --preprocess option")
self.skip_preprocess = True
if args.preprocess:
self.skip_preprocess = False
if self.debug:
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
if self.verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
# Parse configuration file (YAML) if specified
if args.configfile:
self.config = self._get_config_file(args.configfile)
logging.debug("Read in configuration file")
logging.debug(self.config)
if args.enable_evernote:
self.enable_evernote = True
else:
self.enable_evernote = False
if args.enable_filing or args.enable_evernote:
self.enable_filing = True
if not args.configfile:
p.error("Please specify a configuration file(CONFIGFILE) to enable filing")
else:
self.enable_filing = False
self.watch = False
if args.watch_dir:
logging.debug("Starting to watch")
self.watch = True
if self.enable_email:
if not args.configfile:
p.error("Please specify a configuration file(CONFIGFILE) to enable email")
def _clean_up_files(self, files):
"""
Helper function to delete files
:param files: List of files to delete
:type files: list
:returns: None
"""
for f in files:
try:
os.remove(f)
except:
logging.debug("Error removing file %s .... continuing" % f)
def _setup_filing(self):
"""
Instance the proper PyFiler object (either
:class:`pypdfocr.pypdfocr_filer_dirs.PyFilerDirs` or
:class:`pypdfocr.pypdfocr_filer_evernote.PyFilerEvernote`)
TODO: Make this more generic to allow third-party plugin filing objects
:ivar filer: :class:`pypdfocr.pypdfocr_filer.PyFiler` PyFiler subclass object that is instantiated
:ivar pdf_filer: :class:`pypdfocr.pypdfocr_pdffiler.PyPdfFiler` object to help with PDF reading
:returns: Nothing
"""
# Look at self.config and create a self.pdf_filer object
# --------------------------------------------------
# Some sanity checks
# --------------------------------------------------
assert(self.config and self.enable_filing)
for required in ['target_folder', 'default_folder']:
if not required in self.config:
error ("%s must be specified in config file" % required)
else:
# Make sure these required folders are in abspath format
self.config[required] = os.path.abspath(self.config[required])
if 'original_move_folder' in self.config:
# User wants to move the original after filing
orig = 'original_move_folder'
self.config[orig] = os.path.abspath(self.config[orig])
if not os.path.exists(self.config[orig]):
os.makedirs(self.config[orig])
original_move_folder = self.config[orig]
else:
original_move_folder = None
# --------------------------------------------------
# Start the filing object
# --------------------------------------------------
if self.enable_evernote:
self.filer = PyFilerEvernote(self.config['evernote_developer_token'])
else:
self.filer = PyFilerDirs()
self.filer.target_folder = self.config['target_folder']
self.filer.default_folder = self.config['default_folder']
self.filer.original_move_folder = original_move_folder
self.pdf_filer = PyPdfFiler(self.filer)
if self.match_using_filename:
print("Matching using filename as a fallback to pdf contents")
self.pdf_filer.file_using_filename = True
# ------------------------------
# Add all the folder names with associated keywords
# to the filer object
# ------------------------------
keyword_count = 0
folder_count = 0
if 'folders' in self.config:
for folder, keywords in self.config['folders'].items():
folder_count +=1
keyword_count += len(keywords)
# Make sure keywords are lower-cased before adding
keywords = [str(x).lower() for x in keywords]
self.filer.add_folder_target(folder, keywords)
print ("Filing of PDFs is enabled")
print (" - %d target filing folders" % (folder_count))
print (" - %d keywords" % (keyword_count))
def _setup_external_tools(self):
"""
Instantiate the external tool wrappers with their config dicts
"""
self.gs = PyGs(self.config.get('ghostscript',{}))
self.ts = PyTesseract(self.config.get('tesseract',{}))
self.pdf = PyPdf(self.gs)
self.preprocess = PyPreprocess(self.config.get('preprocess', {}))
return
def run_conversion(self, pdf_filename):
"""
Does the following:
- Convert the PDF using GhostScript to TIFF and JPG
- Run Tesseract on the TIFF to extract the text into HOCR (html)
- Use PDF generator to overlay the text on the JPG and output a new PDF
- Clean up temporary image files
:param pdf_filename: Scanned PDF
:type pdf_filename: string
:returns: OCR'ed PDF
:rtype: filename string
"""
print ("Starting conversion of %s" % pdf_filename)
try:
# Make the images for Tesseract
img_dpi, glob_img_filename = self.gs.make_img_from_pdf(pdf_filename)
fns = glob.glob(glob_img_filename)
except Exception:
raise
try:
# Preprocess
if not self.skip_preprocess:
preprocess_imagefilenames = self.preprocess.preprocess(fns)
else:
logging.info("Skipping preprocess step")
preprocess_imagefilenames = fns
# Run teserract
self.ts.lang = self.lang
hocr_filenames = self.ts.make_hocr_from_pnms(preprocess_imagefilenames)
# Generate new pdf with overlayed text
#ocr_pdf_filename = self.pdf.overlay_hocr(tiff_dpi, hocr_filename, pdf_filename)
ocr_pdf_filename = self.pdf.overlay_hocr_pages(img_dpi, hocr_filenames, pdf_filename)
finally:
# Clean up the files
time.sleep(1)
if not self.debug:
# Need to clean up the original image files before preprocessing
if locals().has_key("fns"): # Have to check if this was set before exception raised
logging.info("Cleaning up %s" % fns)
self._clean_up_files(fns)
if locals().has_key("preprocess_imagefilenames"): # Have to check if this was set before exception raised
logging.info("Cleaning up %s" % preprocess_imagefilenames)
self._clean_up_files(preprocess_imagefilenames) # splat the hocr_filenames as it is a list of pairs
for ext in [".hocr", ".html", ".txt"]:
fns_to_remove = [os.path.splitext(fn)[0]+ext for fn in preprocess_imagefilenames]
logging.info("Cleaning up %s" % fns_to_remove)
self._clean_up_files(fns_to_remove) # splat the hocr_filenames as it is a list of pairs
# clean up the hocr input (jpg) and output (html) files
#self._clean_up_files(itertools.chain(*hocr_filenames)) # splat the hocr_filenames as it is a list of pairs
# Seems like newer tessearct > 3.03 is now creating .txt files with the OCR text?/?
#self._clean_up_files([x[1].replace(".hocr", ".txt") for x in hocr_filenames])
print ("Completed conversion successfully to %s" % ocr_pdf_filename)
return ocr_pdf_filename
def file_converted_file(self, ocr_pdffilename, original_pdffilename):
""" move the converted filename to its destiantion directory. Optionally also
moves the original PDF.
:param ocr_pdffilename: Converted PDF file
:type ocr_pdffilename: filename string
:param original_pdffilename: Original scanned PDF file
:type original_pdffilename: filename string
:returns: Target folder name
"rtype: string
"""
filed_path = self.pdf_filer.move_to_matching_folder(ocr_pdffilename)
print("Filed %s to %s as %s" % (ocr_pdffilename, os.path.dirname(filed_path), os.path.basename(filed_path)))
tgt_path = self.pdf_filer.file_original(original_pdffilename)
if tgt_path != original_pdffilename:
print("Filed original file %s to %s as %s" % (original_pdffilename, os.path.dirname(tgt_path), os.path.basename(tgt_path)))
return os.path.dirname(filed_path)
def _send_email(self, infilename, outfilename, filing ):
"""
Send email using smtp
"""
print("Sending email status")
from_addr = self.config["mail_from_addr"]
to_addr_list = self.config["mail_to_list"]
smtpserver = self.config["mail_smtp_server"]
login = self.config["mail_smtp_login"]
password = self.config["mail_smtp_password"]
subject = "PyPDFOCR converted: %s" % (os.path.basename(outfilename))
header = 'From: %s\n' % login
header += 'To: %s\n' % ','.join(to_addr_list)
header += 'Subject: %s\n\n' % subject
message = """
PyPDFOCR Conversion:
--------------------
Original file: %s
Converted file: %s
Filing: %s
""" % (infilename, outfilename, filing)
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(login,password)
problems = server.sendmail(from_addr, to_addr_list, message)
server.quit()
def go(self, argv):
"""
The main entry point into PyPDFOCR
#. Parses options
#. If filing is enabled, call :func:`_setup_filing`
#. If watch is enabled, start the watcher
#. :func:`run_conversion`
#. if filing is enabled, call :func:`file_converted_file`
"""
# Read the command line options
self.get_options(argv)
# Setup tesseract and ghostscript
self._setup_external_tools()
# Setup the pdf filing if enabled
if self.enable_filing:
self._setup_filing()
# Do the actual conversion followed by optional filing and email
if self.watch:
while True: # Make sure the watcher doesn't terminate
try:
py_watcher = PyPdfWatcher(self.watch_dir, self.config.get('watch'))
for pdf_filename in py_watcher.start():
self._convert_and_file_email(pdf_filename)
except KeyboardInterrupt:
break
except Exception as e:
print traceback.print_exc(e)
py_watcher.stop()
else:
self._convert_and_file_email(self.pdf_filename)
def _convert_and_file_email(self, pdf_filename):
"""
Helper function to run the conversion, then do the optional filing, and optional emailing.
"""
ocr_pdffilename = self.run_conversion(pdf_filename)
if self.enable_filing:
filing = self.file_converted_file(ocr_pdffilename, pdf_filename)
else:
filing = "None"
if self.enable_email:
self._send_email(pdf_filename, ocr_pdffilename, filing)
def main(): # pragma: no cover
multiprocessing.freeze_support()
script = PyPDFOCR()
script.go(sys.argv[1:])
if __name__ == '__main__':
main()
| |
try:
from pyb import CAN
except ImportError:
print('SKIP')
raise SystemExit
from array import array
import micropython
import pyb
# test we can correctly create by id (2 handled in can2.py test)
for bus in (-1, 0, 1, 3):
try:
CAN(bus, CAN.LOOPBACK)
print("CAN", bus)
except ValueError:
print("ValueError", bus)
CAN(1).deinit()
CAN.initfilterbanks(14)
can = CAN(1)
print(can)
# Test state when de-init'd
print(can.state() == can.STOPPED)
can.init(CAN.LOOPBACK)
print(can)
print(can.any(0))
# Test state when freshly created
print(can.state() == can.ERROR_ACTIVE)
# Test that restart can be called
can.restart()
# Test info returns a sensible value
print(can.info())
# Catch all filter
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
can.send('abcd', 123, timeout=5000)
print(can.any(0), can.info())
print(can.recv(0))
can.send('abcd', -1, timeout=5000)
print(can.recv(0))
can.send('abcd', 0x7FF + 1, timeout=5000)
print(can.recv(0))
# Test too long message
try:
can.send('abcdefghi', 0x7FF, timeout=5000)
except ValueError:
print('passed')
else:
print('failed')
# Test that recv can work without allocating memory on the heap
buf = bytearray(10)
l = [0, 0, 0, memoryview(buf)]
l2 = None
micropython.heap_lock()
can.send('', 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
can.send('1234', 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
can.send('01234567', 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
can.send('abc', 42)
l2 = can.recv(0, l)
assert l is l2
print(l, len(l[3]), buf)
micropython.heap_unlock()
# Test that recv can work with different arrays behind the memoryview
can.send('abc', 1)
print(bytes(can.recv(0, [0, 0, 0, memoryview(array('B', range(8)))])[3]))
can.send('def', 1)
print(bytes(can.recv(0, [0, 0, 0, memoryview(array('b', range(8)))])[3]))
# Test for non-list passed as second arg to recv
can.send('abc', 1)
try:
can.recv(0, 1)
except TypeError:
print('TypeError')
# Test for too-short-list passed as second arg to recv
can.send('abc', 1)
try:
can.recv(0, [0, 0, 0])
except ValueError:
print('ValueError')
# Test for non-memoryview passed as 4th element to recv
can.send('abc', 1)
try:
can.recv(0, [0, 0, 0, 0])
except TypeError:
print('TypeError')
# Test for read-only-memoryview passed as 4th element to recv
can.send('abc', 1)
try:
can.recv(0, [0, 0, 0, memoryview(bytes(8))])
except ValueError:
print('ValueError')
# Test for bad-typecode-memoryview passed as 4th element to recv
can.send('abc', 1)
try:
can.recv(0, [0, 0, 0, memoryview(array('i', range(8)))])
except ValueError:
print('ValueError')
del can
# Testing extended IDs
can = CAN(1, CAN.LOOPBACK, extframe = True)
# Catch all filter
can.setfilter(0, CAN.MASK32, 0, (0, 0))
print(can)
try:
can.send('abcde', 0x7FF + 1, timeout=5000)
except ValueError:
print('failed')
else:
r = can.recv(0)
if r[0] == 0x7FF+1 and r[3] == b'abcde':
print('passed')
else:
print('failed, wrong data received')
# Test filters
for n in [0, 8, 16, 24]:
filter_id = 0b00001000 << n
filter_mask = 0b00011100 << n
id_ok = 0b00001010 << n
id_fail = 0b00011010 << n
can.clearfilter(0)
can.setfilter(0, pyb.CAN.MASK32, 0, (filter_id, filter_mask))
can.send('ok', id_ok, timeout=3)
if can.any(0):
msg = can.recv(0)
print((hex(filter_id), hex(filter_mask), hex(msg[0]), msg[3]))
can.send("fail", id_fail, timeout=3)
if can.any(0):
msg = can.recv(0)
print((hex(filter_id), hex(filter_mask), hex(msg[0]), msg[3]))
del can
# Test RxCallbacks
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8))
def cb0(bus, reason):
print('cb0')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1(bus, reason):
print('cb1')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb0a(bus, reason):
print('cb0a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1a(bus, reason):
print('cb1a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
can.rxcallback(0, cb0)
can.rxcallback(1, cb1)
can.send('11111111',1, timeout=5000)
can.send('22222222',2, timeout=5000)
can.send('33333333',3, timeout=5000)
can.rxcallback(0, cb0a)
can.send('44444444',4, timeout=5000)
can.send('55555555',5, timeout=5000)
can.send('66666666',6, timeout=5000)
can.send('77777777',7, timeout=5000)
can.rxcallback(1, cb1a)
can.send('88888888',8, timeout=5000)
print(can.recv(0))
print(can.recv(0))
print(can.recv(0))
print(can.recv(1))
print(can.recv(1))
print(can.recv(1))
can.send('11111111',1, timeout=5000)
can.send('55555555',5, timeout=5000)
print(can.recv(0))
print(can.recv(1))
del can
# Testing asynchronous send
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
while can.any(0):
can.recv(0)
can.send('abcde', 1, timeout=0)
print(can.any(0))
while not can.any(0):
pass
print(can.recv(0))
try:
can.send('abcde', 2, timeout=0)
can.send('abcde', 3, timeout=0)
can.send('abcde', 4, timeout=0)
can.send('abcde', 5, timeout=0)
except OSError as e:
if str(e) == '16':
print('passed')
else:
print('failed')
pyb.delay(500)
while can.any(0):
print(can.recv(0))
# Testing rtr messages
bus1 = CAN(1, CAN.LOOPBACK)
while bus1.any(0):
bus1.recv(0)
bus1.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
bus1.setfilter(1, CAN.LIST16, 0, (5, 6, 7, 8), rtr=(True, True, True, True))
bus1.setfilter(2, CAN.MASK16, 0, (64, 64, 32, 32), rtr=(False, True))
bus1.send('',1,rtr=True)
print(bus1.any(0))
bus1.send('',5,rtr=True)
print(bus1.recv(0))
bus1.send('',6,rtr=True)
print(bus1.recv(0))
bus1.send('',7,rtr=True)
print(bus1.recv(0))
bus1.send('',16,rtr=True)
print(bus1.any(0))
bus1.send('',32,rtr=True)
print(bus1.recv(0))
# test HAL error, timeout
can = pyb.CAN(1, pyb.CAN.NORMAL)
try:
can.send('1', 1, timeout=50)
except OSError as e:
print(repr(e))
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network with additional variables to support pruning.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.model_pruning.examples.cifar10 import cifar10_input
from tensorflow.contrib.model_pruning.python import pruning
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
BATCH_SIZE = 128
DATA_DIR = '/tmp/cifar10_data'
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(
data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(
eval_data=eval_data, data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# While instantiating conv and local layers, we add mask and threshold
# variables to the layer by calling the pruning.apply_mask() function.
# Note that the masks are applied only to the weight tensors
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(
images, pruning.apply_mask(kernel, scope), [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(
norm1, pruning.apply_mask(kernel, scope), [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [BATCH_SIZE, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(
tf.matmul(reshape, pruning.apply_mask(weights, scope)) + biases,
name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(
tf.matmul(local3, pruning.apply_mask(weights, scope)) + biases,
name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(
tf.matmul(local4, pruning.apply_mask(weights, scope)),
biases,
name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / BATCH_SIZE
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = DATA_DIR
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2010 Barry Schwartz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import fontforge
import string
from sortsmill import font_db
from sortsmill.glyphbuild import *
from sortsmill.spacing_by_anchors import *
def build_glyphs(bitbucket, f):
from sortsmill import cap_spacing
figures = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
def base(letter):
if letter == 'i':
base = 'dotlessi'
elif letter == 'j':
base = 'uni0237'
else:
base = letter
return base
db = font_db.db_create(f)
db['spacing_anchor_heights'] = { 'hi' : 684, # caps and ascenders
'x' : 376, # ex-height
'o' : 195, # like the letter o
'bl' : 15, # baseline
'lo' : -244 } # descenders
all_glyphs = set(f) - set(['.notdef'])
(smallcaps, capssmall, uppercase, lowercase, fraction_bar, numerators, denominators, remaining) = \
tuple(separate_strings(all_glyphs, [
(lambda s: s[-3:] == '.sc'),
(lambda s: s[-3:] == '.c2'),
(lambda s: is_uppercase(s, last_name)),
(lambda s: is_lowercase(s, last_name)),
(lambda s: s == 'fraction'),
(lambda s: s[-6:] == '.numer'),
(lambda s: s[-6:] == '.denom'),
]))
db["kerning_sets"] = [
(remaining, uppercase | lowercase | smallcaps | capssmall | remaining),
(uppercase, uppercase | lowercase | smallcaps | remaining),
(smallcaps, uppercase | smallcaps | capssmall | remaining),
(lowercase, uppercase | lowercase | remaining),
(numerators, fraction_bar),
(fraction_bar, denominators),
]
db['kerning_rounding'] = '(lambda x: int(round(x/5.0)) * 5)'
# db['kerning_rounding'] = '(lambda x: x if abs(x) < 10 else int(round(x/5.0))*5)'
build_several_space_glyphs(f, emsize = 1000, spacesize = 185,
thinspacesize = 1000 / 6,
hairspacesize = 1000 / 10,
tabwidth = f['zero.lining'].width)
propagate_hyphens(f)
# propagate_hyphens(f, '.uppercase')
build_spacing_marks(f, width = 2 * 195)
make_glyph_reference('quotesingle', f['minute'])
make_glyph_reference('quotedbl', f['second'])
make_glyph_reference('asciitilde', f['uni2053']) # Swung dash.
# make_glyph_reference('i.TRK', f['i']) <-- Handled below.
make_glyph_reference('Dcroat', f['Eth'])
make_glyph_reference('dcroat.sc', f['eth.sc'])
make_glyph_reference('L.CAT', f['L'])
make_glyph_reference('l.CAT', f['l'])
make_glyph_reference('L.CAT.c2', f['L.c2'])
make_glyph_reference('l.CAT.sc', f['l.sc'])
build_multigraph('ellipsis', [f['period'], f['period'], f['period']])
for fig in figures + ['dollar']:
make_glyph_reference(fig, f[fig + '.hanging'])
make_glyph_reference('uni00B9', f['one.sup'])
make_glyph_reference('uni00B2', f['two.sup'])
make_glyph_reference('uni00B3', f['three.sup'])
for extension in [('.numer', 244), ('.sub', -180), ('.sup', 244)]:
for fig in figures:
make_glyph_reference(fig + extension[0],
f[fig + '.denom'],
transformation = (1, 0, 0, 1, 0, extension[1]),
copy_spacing_anchors = (extension[0] == '.numer'))
build_multigraph('onequarter', [f['one.numer'], f['fraction'], f['four.denom']])
build_multigraph('onehalf', [f['one.numer'], f['fraction'], f['two.denom']])
build_multigraph('threequarters', [f['three.numer'], f['fraction'], f['four.denom']])
for g in f:
if g[-3:] == '.sc' and g not in ['i.TRK.sc', 'l.CAT.sc']:
if g == 'periodcentered.sc':
make_glyph_reference(g[:-3] + '.c2', f[g])
elif g == 'uni0163.sc':
make_glyph_reference('uni0162.c2', f[g])
elif g == 'uni0219.sc':
make_glyph_reference('uni0218.c2', f[g])
elif g == 'uni021B.sc':
make_glyph_reference('uni021A.c2', f[g])
elif g in ('ae.sc', 'oe.sc', 'ij.sc'):
make_glyph_reference(g[:-3].upper() + '.c2', f[g])
else:
make_glyph_reference(g[:-3].capitalize() + '.c2', f[g])
#--------------------------------------------------------------------------
for letter in 'GKkLlNnRr':
build_accented_glyph(letter + 'commaaccent', f[base(letter)], f['uni0326'])
build_accented_glyph('gcommaaccent', f['g'], f['uni0312'])
build_accented_glyph('uni0218', f['S'], f['uni0326'])
build_accented_glyph('uni0219', f['s'], f['uni0326'])
build_accented_glyph('uni021A', f['T'], f['uni0326'])
build_accented_glyph('uni021B', f['t'], f['uni0326'])
for letter in 'gklnr':
build_accented_glyph(letter + 'commaaccent.sc', f[letter + '.sc'], f['uni0326'])
build_accented_glyph('uni0219.sc', f['s.sc'], f['uni0326'])
build_accented_glyph('uni021B.sc', f['t.sc'], f['uni0326'])
#--------------------------------------------------------------------------
for letter in 'CcSs':
build_accented_glyph(letter + 'cedilla', f[base(letter)], f['uni0327'])
remove_overlap(f[letter + 'cedilla'])
build_accented_glyph('uni0162', f['T'], f['uni0327'])
remove_overlap(f['uni0162'])
build_accented_glyph('uni0163', f['t'], f['uni0327'])
remove_overlap(f['uni0163'])
for letter in 'cs':
build_accented_glyph(letter + 'cedilla.sc', f[letter + '.sc'], f['uni0327'])
remove_overlap(f[letter + 'cedilla.sc'])
build_accented_glyph('uni0163.sc', f['t.sc'], f['uni0327'])
remove_overlap(f['uni0163.sc'])
#--------------------------------------------------------------------------
for letter in 'aeiou':
build_accented_glyph(letter + 'grave', f[base(letter)], f['gravecomb'])
for letter in 'AEIOU':
build_accented_glyph(letter + 'grave', f[base(letter)], f['gravecomb.cap'])
for letter in 'aeiou':
build_accented_glyph(letter + 'grave.sc', f[letter + '.sc'], f['gravecomb'])
#--------------------------------------------------------------------------
for letter in 'aceinorsuyz':
build_accented_glyph(letter + 'acute', f[base(letter)], f['acutecomb'])
for letter in 'ACEILNORSUYZ':
build_accented_glyph(letter + 'acute', f[base(letter)], f['acutecomb.cap'])
# build_accented_glyph('lacute', f['l'], f['acutecomb.cap']) <-- We are making this one by hand.
for letter in 'aceilnorsuyz':
build_accented_glyph(letter + 'acute.sc', f[letter + '.sc'], f['acutecomb'])
#--------------------------------------------------------------------------
for letter in 'ainou':
build_accented_glyph(letter + 'tilde', f[base(letter)], f['tildecomb'])
for letter in 'AINOU':
build_accented_glyph(letter + 'tilde', f[base(letter)], f['tildecomb.cap'])
for letter in 'ainou':
build_accented_glyph(letter + 'tilde.sc', f[letter + '.sc'], f['tildecomb'])
#--------------------------------------------------------------------------
for letter in 'aeouy':
build_accented_glyph(letter + 'dieresis', f[base(letter)], f['uni0308'])
for letter in 'AEIOUY':
build_accented_glyph(letter + 'dieresis', f[base(letter)], f['uni0308.cap'])
for letter in 'aeiouy':
build_accented_glyph(letter + 'dieresis.sc', f[letter + '.sc'], f['uni0308'])
for letter in 'i':
build_accented_glyph(letter + 'dieresis', f[base(letter)], f['uni0308.narrow'])
#--------------------------------------------------------------------------
for letter in 'au':
build_accented_glyph(letter + 'ring', f[base(letter)], f['uni030A'])
for letter in 'AU':
build_accented_glyph(letter + 'ring', f[base(letter)], f['uni030A.cap'])
for letter in 'au':
build_accented_glyph(letter + 'ring.sc', f[letter + '.sc'], f['uni030A'])
#--------------------------------------------------------------------------
for letter in 'acegijosuwy':
build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302'])
for letter in 'hACEGHIJOSUWY':
build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302.cap'])
for letter in ['f_h', 'f_f_h']:
build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302.cap'])
for letter in 'aceghijosuwy':
build_accented_glyph(letter + 'circumflex.sc', f[letter + '.sc'], f['uni0302'])
#--------------------------------------------------------------------------
for letter in 'aegiou':
build_accented_glyph(letter + 'breve', f[base(letter)], f['uni0306'])
for letter in 'AEGIOU':
build_accented_glyph(letter + 'breve', f[base(letter)], f['uni0306.cap'])
for letter in 'aegiou':
build_accented_glyph(letter + 'breve.sc', f[letter + '.sc'], f['uni0306'])
#--------------------------------------------------------------------------
for letter in 'cegz':
build_accented_glyph(letter + 'dotaccent', f[base(letter)], f['uni0307'])
for letter in 'CEGIZ':
build_accented_glyph(letter + 'dotaccent', f[base(letter)], f['uni0307.cap'])
for letter in 'cegz':
build_accented_glyph(letter + 'dotaccent.sc', f[letter + '.sc'], f['uni0307'])
build_accented_glyph('i.TRK', f['dotlessi'], f['uni0307'])
build_accented_glyph('i.TRK.sc', f['i.sc'], f['uni0307'])
build_accented_glyph('j.TRK', f['uni0237'], f['uni0307'])
build_accented_glyph('i', f['dotlessi'], f['uni0307.high'])
build_accented_glyph('iogonek', f['iogonek.dotless'], f['uni0307.high'])
build_accented_glyph('j', f['uni0237'], f['uni0307.high'])
#--------------------------------------------------------------------------
for letter in 'cenrsz':
build_accented_glyph(letter + 'caron', f[base(letter)], f['uni030C'])
for letter in 'CDENRTSZ':
build_accented_glyph(letter + 'caron', f[base(letter)], f['uni030C.cap'])
for letter in 'dLlt':
build_accented_glyph(letter + 'caron', f[base(letter)], f['uni0315'])
for letter in 'cdenrstz':
build_accented_glyph(letter + 'caron.sc', f[letter + '.sc'], f['uni030C'])
build_accented_glyph('lcaron.sc', f['l.sc'], f['uni0315'])
#--------------------------------------------------------------------------
for letter in 'aeiou':
build_accented_glyph(letter + 'macron', f[base(letter)], f['uni0304'])
for letter in 'AEIOU':
build_accented_glyph(letter + 'macron', f[base(letter)], f['uni0304.cap'])
for letter in 'aeiou':
build_accented_glyph(letter + 'macron.sc', f[letter + '.sc'], f['uni0304'])
#--------------------------------------------------------------------------
for letter in 'ou':
build_accented_glyph(letter + 'hungarumlaut', f[base(letter)], f['uni030B'])
for letter in 'OU':
build_accented_glyph(letter + 'hungarumlaut', f[base(letter)], f['uni030B.cap'])
for letter in 'ou':
build_accented_glyph(letter + 'hungarumlaut.sc', f[letter + '.sc'], f['uni030B'])
#--------------------------------------------------------------------------
build_multigraph('napostrophe', [f['quoteright'], f['n']])
build_multigraph('IJ', [f['I'], f['J']])
build_multigraph('ij', [f['i'], f['j']])
build_multigraph('ij.sc', [f['i.sc'], f['j.sc']])
# build_multigraph('Ldot', [f['L'], f['periodcentered']]) # Done by hand.
build_multigraph('ldot', [f['l'], f['periodcentered']])
# build_multigraph('ldot.sc', [f['l.sc'], f['periodcentered.sc']]) # Done by hand.
#--------------------------------------------------------------------------
f.selection.all()
space_selected_by_anchors(f)
f.selection.none()
generate_kerning_and_read_features(None, f)
#--------------------------------------------------------------------------
font_db.db_close(f)
#--------------------------------------------------------------------------
| |
import sys
from random import random
from operator import add
from datetime import datetime
from pyspark import SparkContext
from pyspark.mllib.classification import LogisticRegressionWithSGD, LogisticRegressionModel
from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD
from numpy import array
import json
import pymongo
from vaderSentiment.vaderSentiment import sentiment as vaderSentiment
# Subtract 2 dates s2 - s1. Dates are in string format. Return difference
# in hours.
def subtract_dates(s2, s1):
s_dates = [s2, s1]
d_dates = []
for s in s_dates:
if '+' in s:
head, sep, tail = s.partition('+')
d_dates.append(datetime.strptime(head, "%Y-%m-%dT%H:%M:%S"))
elif 'Z' in s:
head, sep, tail = s.partition('Z')
d_dates.append(datetime.strptime(head, "%Y-%m-%dT%H:%M:%S.%f"))
elif '.' in s:
d_dates.append(datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%f"))
else:
d_dates.append(datetime.strptime(s, "%Y-%m-%dT%H:%M:%S"))
date_diff =abs(d_dates[0]-d_dates[1])
hrs_diff = date_diff.days*24.0 + (date_diff.seconds/3600.0)+0.01
return hrs_diff
# Parse a duration of the form 'PT1M6S' which is 1 minutes and 6 seconds
def parse_duration(s):
hrs = 0.0
mins = 0.0
secs = 0.0
s = s.strip('PT')
if 'H' in s:
head, sep, s = s.partition('H')
hrs = float(head)
if 'M' in s:
head, sep, s = s.partition('M')
mins = float(head)
if 'S' in s:
head, sep, tail = s.partition('S')
secs = float(head)
return hrs*3600 + mins*60 + secs
# Load the data from the json file into a dictionary
def load_data_from_file(sc, file_name):
input = sc.textFile(file_name)
data = input.map(lambda x: json.loads(x))
print 'DATA COUNT: %d' % (data.count())
#print data.first()
return data
# Set the 'source' field to the source argument
def set_source(dict, source):
dict.setdefault('source', source)
return dict
# Load the data from db that was created after date. Add a source
# field indicating which source it came from
def load_data_after_date(sc, db, start_date, end_date, source):
db_source = db[source]
#cursor = db_source.find().limit(50)
startDate = datetime.strptime(start_date, '%Y-%m-%dT%H:%M:%S.%f').isoformat()
endDate = datetime.strptime(end_date, '%Y-%m-%dT%H:%M:%S.%f').isoformat()
cursor = db_source.find({'created_at': {'$gte':startDate,'$lt':endDate}}).limit(1000)
cursor_list = []
for doc in cursor:
#print(doc)
cursor_list.append(doc)
input = sc.parallelize(cursor_list)
data = input.map(lambda x: set_source(x, source.lower()))
#print(data.first())
return data
# Get the sentiment score for each item using vaderSentiment
def get_sentiment(item, source):
''' Get the overall sentiment of the videos description '''
if source == 'twitter':
description = item['tweet']['orig_text']
elif source == 'facebook':
if 'description' in item:
description = item['description']
else:
description = ''
else:
description = item['items'][0]['snippet']['description']
description = description.encode('utf-8').strip()
sent = vaderSentiment(description)
item.setdefault("sentiment", sent['compound'])
return item
# Load and parse the data into MLLib LabeledPoint data types
# Pull out the attributes that are required from the data source
def create_labeled_points_twitter(dict, reg_type):
retweets = float(dict['tweet']['orig_retweet_count'])
popularity = retweets
if reg_type == 'logistic':
if popularity >= 158.0:
popularity = 1.0
else:
popularity = 0.0
video_length_sec = float(dict['tweet']['orig_video_length_ms'])/1000.0
favorite_count = float(dict['tweet']['orig_favorite_count'])
last_index = len(dict['tweet']['rt_history']) - 1
if last_index >= 0:
end_time = dict['tweet']['rt_history'][last_index]['rt_created_at']
else:
end_time = dict['last_modified']
start_time = dict['tweet']['orig_created_at']
time_hrs = subtract_dates(end_time, start_time)
growth_rate = retweets / time_hrs
sentiment = dict['sentiment']
features = [video_length_sec, retweets, favorite_count, growth_rate, sentiment, 1]
LP = LabeledPoint(popularity, features)
#print LP
return LP
# Load and parse the data into MLLib LabeledPoint data types
# Pull out the attributes that are required from the data source
def create_labeled_points_facebook(dict, reg_type):
total_likes = float(dict['total_likes'])
popularity = total_likes
if reg_type == 'logistic':
if popularity >= 496.0:
popularity = 1.0
else:
popularity = 0.0
video_length_sec = float(dict['length'])
total_comments = float(dict['total_comments'])
last_index = len(dict['history']) - 1
time_hrs = subtract_dates(dict['history'][last_index]['timestamp'],
dict['created_time'])
growth_rate = total_likes / time_hrs
sentiment = dict['sentiment']
features = [video_length_sec, total_likes, total_comments, growth_rate, sentiment, 2]
LP = LabeledPoint(popularity, features)
#print LP
return LP
# Filter out any YouTube data that does not contain the required fields
def filter_youtube_data(dict):
if dict['items'] == []:
return False
elif 'contentDetails' not in dict['items'][0]:
return False
elif 'duration' not in dict['items'][0]['contentDetails']:
return False
return True
# Load and parse the data into MLLib LabeledPoint data types
# Pull out the attributes that are required from the data source
def create_labeled_points_youtube(dict, reg_type):
view_count = float(dict['items'][0]['statistics']['viewCount'])
popularity = view_count
if reg_type == 'logistic':
if popularity >= 50790.0:
popularity = 1.0
else:
popularity = 0.0
video_length_sec = parse_duration(dict['items'][0]['contentDetails']['duration'])
favorite_count = float(dict['items'][0]['statistics']['favoriteCount'])
last_index = len(dict['items'][0]['stats_history']) - 1
time_hrs = subtract_dates(dict['items'][0]['stats_history'][last_index]['timestamp'],
dict['items'][0]['snippet']['publishedAt'])
growth_rate = view_count / time_hrs
sentiment = dict['sentiment']
features = [video_length_sec, view_count, favorite_count, growth_rate, sentiment, 3]
LP = LabeledPoint(popularity, features)
#print LP
return LP
# Set 'prediction_logistic_reg' field with the prediction from the given model
def predict_from_model(dict, model):
source = dict['source']
if source == 'twitter':
LP = create_labeled_points_twitter(dict, 'logistic')
elif source == 'facebook':
LP = create_labeled_points_facebook(dict, 'logistic')
else:
LP = create_labeled_points_youtube(dict, 'logistic')
prediction = model.predict(LP.features)
dict.setdefault('prediction_logistic_reg', prediction)
return dict
# Perform Spark prediction
def spark_create_model(data_size, file_path, store=False):
"""
Spark Model Creation
"""
# Set this variable to distinguish between logistic and linear regression
REGRESSION_TYPE = 'logistic'
sc = SparkContext(appName="SparkCreateModel")
# load Twitter data
if data_size == 'small':
twitter_data = load_data_from_file(sc, "file:///root/mongoData/small_twitter.json")
else:
twitter_data = load_data_from_file(sc, "file:///root/mongoData/twitter.json")
# load YouTube data
if data_size == 'small':
youtube_data = load_data_from_file(sc, "file:///root/mongoData/small_youtube.json")
else:
youtube_data = load_data_from_file(sc, "file:///root/mongoData/youtube.json")
youtube_data = youtube_data.filter(filter_youtube_data)
# load Facebook data
if data_size == 'small':
facebook_data = load_data_from_file(sc, "file:///root/mongoData/small_facebook.json")
else:
facebook_data = load_data_from_file(sc, "file:///root/mongoData/facebook.json")
# Store the sentiment score for each data item
sent_twitter_data = twitter_data.map(lambda x: get_sentiment(x, 'twitter'))
sent_youtube_data = youtube_data.map(lambda x: get_sentiment(x, 'youtube'))
sent_facebook_data = facebook_data.map(lambda x: get_sentiment(x, 'facebook'))
#create MLLib LabeledPoints
twitter_LP = sent_twitter_data.map(lambda x: create_labeled_points_twitter(x, REGRESSION_TYPE))
youtube_LP = sent_youtube_data.map(lambda x: create_labeled_points_youtube(x, REGRESSION_TYPE))
facebook_LP = sent_facebook_data.map(lambda x: create_labeled_points_facebook(x, REGRESSION_TYPE))
# split data in to training (80%) and test(20%) sets
train_twitter, test_twitter = twitter_LP.randomSplit([0.8, 0.2], seed=0)
train_youtube, test_youtube = youtube_LP.randomSplit([0.8, 0.2], seed=0)
train_facebook, test_facebook = facebook_LP.randomSplit([0.8, 0.2], seed=0)
#combine all 3 datasets with the RDD.union command
train_LP = train_twitter.union(train_facebook).union(train_youtube)
test_LP = test_twitter.union(test_facebook).union(test_youtube)
# Build logistic regression model
model_log = LogisticRegressionWithSGD.train(train_LP)
if store == True:
model_log.save(sc, file_path)
# Evaluate the model on training data
preds_train_log = train_LP.map(lambda p: (p.label, model_log.predict(p.features)))
total_train = float(train_LP.count())
trainErr_log = preds_train_log.filter(lambda (v, p): v != p).count() / total_train
# Evaluate the model on test data
preds_test_log = test_LP.map(lambda p: (p.label, model_log.predict(p.features)))
total_test = float(test_LP.count())
testErr_log = preds_test_log.filter(lambda (v, p): v != p).count() / total_test
twitter_LP_count = twitter_LP.count()
youtube_LP_count = youtube_LP.count()
facebook_LP_count = facebook_LP.count()
print('TWITTER LP COUNT %d' % (twitter_LP_count))
print('YOUTUBE LP COUNT %d' % (youtube_LP_count))
print('FACEBOOK LP COUNT %d' % (facebook_LP_count))
print("Train Error = " + str(trainErr_log))
print("Test Error = " + str(testErr_log))
print(model_log)
sc.stop()
# Pull data from a MongoDB after a certain date and predict on this data
# using the model stored at file_path. Use the MongoDB with host, port and
# db_name
def spark_predict(file_path, start_date, end_date, db_name='test', host='67.228.179.2', port='27017'):
sc = SparkContext(appName="SparkPredict")
db = pymongo.MongoClient(host, int(port))[db_name]
# Load data and add a source field indicating which source it came from
twitter_data = load_data_after_date(sc, db, start_date, end_date, 'twitter')
youtube_data = load_data_after_date(sc, db, start_date, end_date, 'Youtube')
facebook_data = load_data_after_date(sc, db, start_date, end_date, 'facebook')
youtube_data = youtube_data.filter(filter_youtube_data)
# Store the sentiment score for each data item
sent_twitter_data = twitter_data.map(lambda x: get_sentiment(x, 'twitter'))
sent_youtube_data = youtube_data.map(lambda x: get_sentiment(x, 'youtube'))
sent_facebook_data = facebook_data.map(lambda x: get_sentiment(x, 'facebook'))
# combine all of the data in to 1 RDD
all_data = sent_twitter_data.union(sent_youtube_data).union(sent_facebook_data)
# Load the model stored at file_path
model = LogisticRegressionModel.load(sc, file_path)
# Make predictions on all of the data
all_preds = all_data.map(lambda x: predict_from_model(x, model))
#Write predictions to the database
for dict in all_preds.collect():
if dict['source'] == 'twitter':
db.twitter.update_one({'ID':dict['ID']},
{'$set':{'prediction_logistic_reg':dict['prediction_logistic_reg']}})
if dict['source'] == 'facebook':
db.facebook.update_one({'id':dict['id']},
{'$set':{'prediction_logistic_reg':dict['prediction_logistic_reg']}})
if dict['source'] == 'youtube':
db.Youtube.update_one({'ID':dict['ID']},
{'$set':{'prediction_logistic_reg':dict['prediction_logistic_reg']}})
all_count = all_data.count()
twitter_count = sent_twitter_data.count()
youtube_count = sent_youtube_data.count()
facebook_count = sent_facebook_data.count()
print('ALL LP COUNT %d' % (all_count))
print('TWITTER LP COUNT %d' % (twitter_count))
print('YOUTUBE LP COUNT %d' % (youtube_count))
print('FACEBOOK LP COUNT %d' % (facebook_count))
print(model)
sc.stop()
if __name__ == "__main__":
# Create a logistic regression model in Spark. The 1st parameter
# specifies which dataset to use. The 2nd parameter specifies the
# file_path in which to store the model, The 3rd optional parameter
# is either True of False. True if you want to save the model that is
# created and False if you do not want to save it.
#spark_create_model('small', 'small_data_log_model', False)
spark_create_model('large', 'large_data_log_model', True)
# Run predictions using the model specified with the 1st parameter. Collect
# data from the database after the date specified by the 2nd parameter. The
# last 3 parameters specify the name of the database, the host IP of the database
# and the port of the database.
start_date = '2016-04-07T00:00:00.000000'
end_date = '2016-04-08T00:00:00.000000'
#spark_predict('small_data_log_model', start_date, end_date, 'VideosDB', '67.228.179.2', '27017')
#spark_predict('large_data_log_model', start_date, end_date, 'VideosDB', '67.228.179.2', '27017')
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 SINA Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Zhongyue Luo, SINA Corporation.
#
"""Extracts OpenStack config option info from module(s)."""
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
from ceilometer.openstack.common import importutils
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
MULTISTROPT: 'multi valued',
}
OPTION_COUNT = 0
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
WORDWRAP_WIDTH = 60
def main(srcfiles):
print '\n'.join(['#' * 26, '# ceilometer.conf sample #', '#' * 26, ''])
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
continue
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
print "# Total option count: %d" % OPTION_COUNT
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except (ValueError, AttributeError), err:
return None
except ImportError, ie:
sys.stderr.write("%s\n" % str(ie))
return None
except Exception, e:
return None
def _guess_groups(opt, mod_obj):
groups = []
# is it in the DEFAULT group?
if (opt.dest in cfg.CONF and
not isinstance(cfg.CONF[opt.dest], cfg.CONF.GroupAttr)):
groups.append('DEFAULT')
# what other groups is it in?
for key, value in cfg.CONF.items():
if not isinstance(value, cfg.CONF.GroupAttr):
continue
if opt.dest not in value:
continue
groups.append(key)
if len(groups) == 1:
return groups[0]
group = None
for g in groups:
if g in mod_obj.__name__:
group = g
break
if group is None and 'DEFAULT' in groups:
sys.stderr.write("Guessing that " + opt.dest +
" in " + mod_obj.__name__ +
" is in DEFAULT group out of " +
','.join(groups) + "\n")
return 'DEFAULT'
if group is None:
sys.stderr.write("Unable to guess what group " + opt.dest +
" in " + mod_obj.__name__ +
" is in out of " + ','.join(groups) + "\n")
sys.exit(1)
sys.stderr.write("Guessing that " + opt.dest +
" in " + mod_obj.__name__ +
" is in the " + group +
" group out of " + ','.join(groups) + "\n")
return group
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print "[%s]" % group
print
global OPTION_COUNT
for mod, opts in opts_by_module:
OPTION_COUNT += len(opts)
print '######## defined in %s ########' % mod
print
for opt in opts:
_print_opt(opt)
print
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(s):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if s.startswith(BASEDIR):
return s.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in s:
return s.replace(BASEDIR, '')
elif s == _get_my_ip():
return '10.0.0.1'
elif s == socket.getfqdn():
return 'nova'
elif s.strip() != s:
return '"%s"' % s
return s
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError), err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print '#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH))
try:
if opt_default is None:
print '#%s=<None>' % opt_name
elif opt_type == STROPT:
assert(isinstance(opt_default, basestring))
print '#%s=%s' % (opt_name, _sanitize_default(opt_default))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, str(opt_default).lower())
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print '#%s=%s' % (opt_name, opt_default)
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print '#%s=%s' % (opt_name, ','.join(opt_default))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print '#%s=%s' % (opt_name, default)
print
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "usage: python %s [srcfile]...\n" % sys.argv[0]
sys.exit(0)
main(sys.argv[1:])
| |
import random
from datetime import timedelta
from crispy_forms.helper import FormHelper
from crispy_forms.layout import ButtonHolder, Div, Layout, Submit, HTML
from phonenumber_field.formfields import PhoneNumberField
from model_utils import Choices
from django import forms
from django.conf import settings
from django.conf.urls import url as re_path
from django.contrib import messages
from django.contrib.auth.hashers import is_password_usable
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils import timezone
from django.urls import reverse_lazy
from django.db import transaction
from django.db.models import Q, Case, When, Value, BooleanField
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from assopy.models import Invoice, Order
from p3.models import P3Profile, TicketConference
from p3.utils import assign_ticket_to_user
from .accounts import get_or_create_attendee_profile_for_new_user
from .api import generate_matrix_password
from .cfp import AddSpeakerToTalkForm
from .models import (
AttendeeProfile,
TALK_STATUS,
Conference,
Speaker,
TalkSpeaker,
Ticket,
StreamSet,
ATTENDEEPROFILE_VISIBILITY,
ATTENDEEPROFILE_GENDER,
)
from .tickets import reset_ticket_settings
from .decorators import full_profile_required
def fare_valid_for_matrix_login(user, ticket):
"""
Return whether or not `ticket` allows `user` to access our Matrix chat
server.
The conditions are:
1. `ticket` is *assigned* to `user` AND
2. `ticket.fare.code` is in (TRCC, TRCP, TRSC, TRSP, TRPC, TRPP)
"""
return ticket.user == user and ticket.fare.code[2] in 'CSP'
@login_required
@full_profile_required
def user_dashboard(request):
proposals = get_proposals_for_current_conference(request.user)
orders = get_orders_for_current_conference(request.user)
invoices = get_invoices_for_current_conference(request.user)
tickets = get_tickets_for_current_conference(request.user)
matrix_username = None
matrix_password = None
if any(fare_valid_for_matrix_login(request.user, t) for t in tickets):
matrix_username = request.user.email
matrix_password = request.user.password \
if is_password_usable(request.user.password) \
else generate_matrix_password(request.user)
return TemplateResponse(
request,
"conference/user_panel/dashboard.html",
{
# Because in the template TALK_STATUS.accepted will be expanded to
# the verbose name, and therefore comparison in the template will
# fail. Putting that in a separate variable.
"ACCEPTED_PROPOSAL": TALK_STATUS.accepted,
"proposals": proposals,
"orders": orders,
"invoices": invoices,
"tickets": tickets,
"matrix_username": matrix_username,
"matrix_password": matrix_password,
},
)
@login_required
def manage_ticket(request, ticket_id):
ticket = get_object_or_404(Ticket, pk=ticket_id)
if not ticket.fare.is_conference or ticket not in get_tickets_for_current_conference(user=request.user):
return HttpResponse("Can't do", status=403)
ticket_configuration, _ = TicketConference.objects.get_or_create(
ticket=ticket,
)
ticket_configuration_form = TicketConferenceConfigForm(
instance=ticket_configuration,
# Note: The name cannot be edited by the user.
initial={"name": ticket.name},
)
if request.method == "POST":
ticket_configuration_form = TicketConferenceConfigForm(
request.POST, instance=ticket_configuration
)
if ticket_configuration_form.is_valid():
with transaction.atomic():
ticket_configuration_form.save()
messages.success(request, "Ticket configured!")
return redirect("user_panel:dashboard")
return TemplateResponse(
request,
"conference/user_panel/configure_ticket.html",
{"ticket_configuration_form": ticket_configuration_form, "ticket": ticket},
)
@login_required
def assign_ticket(request, ticket_id):
ticket = get_object_or_404(Ticket, pk=ticket_id)
if ticket.buyer != request.user or ticket not in get_tickets_for_current_conference(user=request.user):
return HttpResponse("Can't do", status=403)
assignment_form = AssignTicketForm(initial={'email': ticket.assigned_email})
if request.method == "POST":
assignment_form = AssignTicketForm(request.POST)
if assignment_form.is_valid():
user = assignment_form.get_user()
with transaction.atomic():
assign_ticket_to_user(ticket, user)
reset_ticket_settings(ticket)
messages.success(
request, "Ticket successfully reassigned to %s" % user.email
)
return redirect("user_panel:dashboard")
return TemplateResponse(
request,
"conference/user_panel/assign_ticket.html",
{"ticket": ticket, "assignment_form": assignment_form},
)
@login_required
def privacy_settings(request):
attendee_profile = get_or_create_attendee_profile_for_new_user(user=request.user)
p3_profile = attendee_profile.p3_profile
privacy_form = ProfileSpamControlForm(instance=p3_profile)
if request.method == "POST":
privacy_form = ProfileSpamControlForm(instance=p3_profile, data=request.POST)
if privacy_form.is_valid():
privacy_form.save()
return TemplateResponse(
request,
"conference/user_panel/privacy_settings.html",
{"privacy_form": privacy_form},
)
@login_required
def profile_settings(request):
attendee_profile = get_or_create_attendee_profile_for_new_user(user=request.user)
profile_form = ProfileSettingsForm(instance=attendee_profile)
if request.method == "POST":
profile_form = ProfileSettingsForm(
instance=attendee_profile, data=request.POST, files=request.FILES
)
if profile_form.is_valid():
profile_form.save()
# Read the saved data back to make sure things get saved correctly
profile_form = ProfileSettingsForm(instance=attendee_profile)
return TemplateResponse(
request,
"conference/user_panel/profile_settings.html",
{"profile_form": profile_form},
)
class AssignTicketForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
try:
self.get_user()
except User.DoesNotExist:
raise forms.ValidationError(
"Sorry, user does not exist in our system. "
"Please ask them to create an account first"
)
return self.cleaned_data["email"]
def get_user(self):
return User.objects.get(
is_active=True,
email__iexact=self.cleaned_data["email"],
)
class CommaStringMultipleChoiceField(forms.MultipleChoiceField):
def to_python(self, value):
return [val.rstrip().lstrip() for val in value.split(",")]
def clean(self, value):
return ",".join([val.rstrip().lstrip() for val in value])
class TicketConferenceConfigForm(forms.ModelForm):
days = CommaStringMultipleChoiceField(
label="Days of attendance",
widget=forms.CheckboxSelectMultiple(),
required=False,
)
# Note: The name is just displayed for reference. Only support can
# change it via the Django admin
name = forms.CharField(
help_text="Please contact support to update how your name is displayed on your ticket.",
disabled=True,
required=False,
)
class Meta:
model = TicketConference
fields = ["name",
# XXX Disabled for EP2020; see #1269
#"diet",
#"shirt_size",
"tagline",
"days"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["days"].choices = self.conference_days()
def conference_days(self):
conference = Conference.objects.current()
choices = []
date = conference.conference_start
while date <= conference.conference_end:
choices.append((str(date), date.strftime("%A %d %B %Y")))
date += timedelta(days=1)
return choices
class ProfileSpamControlForm(forms.ModelForm):
spam_recruiting = forms.BooleanField(
label="I want to receive a few selected sponsor messages and job offers through EuroPython.",
required=False,
)
spam_user_message = forms.BooleanField(
label="I want to receive private messages from other participants.",
required=False,
)
spam_sms = forms.BooleanField(
label="I want to receive SMS during the conference for main communications.",
required=False,
)
class Meta:
model = P3Profile
fields = ("spam_recruiting", "spam_user_message", "spam_sms")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Div(
"spam_recruiting",
template="conference/user_panel/forms/privacy_settings_recruiting.html",
),
Div(
"spam_user_message",
template="conference/user_panel/forms/privacy_settings_user_messages.html",
),
Div(
"spam_sms",
template="conference/user_panel/forms/privacy_settings_sms_messages.html",
),
ButtonHolder(Submit("update", "Update", css_class="btn btn-primary")),
)
PICTURE_CHOICES = Choices(
("none", "Do not show any picture"),
("gravatar", "Use my Gravatar"),
("url", "Use this url"),
("file", "Use this picture"),
)
class ProfileSettingsForm(forms.ModelForm):
# TODO move this form and AddSpeakerToTalkForm forms to a separate file
# and define a common ancestor as they share some of the fields
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
email = forms.EmailField()
phone = PhoneNumberField(
help_text=(
"We require a mobile phone number for all speakers "
"for last minute contacts and in case we need "
"timely clarification (if no reponse to previous emails). "
"Use the international format (e.g.: +44 123456789). "
"This field will <strong>never</strong> be published."
),
max_length=30,
required=False,
)
gender = forms.ChoiceField(
help_text=(
"We use this information for statistics related to conference "
"attendance diversity. "
"This field will <strong>never</strong> be published."
),
choices=(("", "", ""),) + ATTENDEEPROFILE_GENDER,
widget=forms.Select,
required=True,
)
is_minor = AddSpeakerToTalkForm.base_fields["is_minor"]
job_title = AddSpeakerToTalkForm.base_fields["job_title"]
company = AddSpeakerToTalkForm.base_fields["company"]
company_homepage = AddSpeakerToTalkForm.base_fields["company_homepage"]
bio = forms.CharField(
label="Compact biography",
help_text="Short biography (one or two paragraphs). Do not paste your CV",
widget=forms.Textarea,
required=False,
)
tagline = forms.CharField(
label="Tagline", help_text="Describe yourself in one line.", required=False
)
twitter = forms.CharField(max_length=80, required=False)
visibility = forms.ChoiceField(
label="",
choices=ATTENDEEPROFILE_VISIBILITY,
widget=forms.RadioSelect,
required=False,
)
# The following fields are rendered manually, not using crispy forms, in
# order to have more control over their layout.
picture_options = forms.ChoiceField(
label="", choices=PICTURE_CHOICES, required=False, widget=forms.RadioSelect
)
image_url = forms.URLField(required=False)
image = forms.FileField(required=False, widget=forms.FileInput)
class Meta:
model = AttendeeProfile
fields = (
# first section
"first_name",
"last_name",
"is_minor",
"phone",
"gender",
"email",
# second section
"picture_options",
"image_url",
"image",
# third section
"tagline",
"twitter",
"personal_homepage",
"location",
"job_title",
"company",
"company_homepage",
"bio",
# fourth section
"visibility",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set the initial values for fields that are not part of AttendeeProfile
user = self.instance.user
self.fields["first_name"].initial = user.first_name
self.fields["last_name"].initial = user.last_name
self.fields["email"].initial = user.email
p3_profile = self.instance.p3_profile
self.fields["tagline"].initial = p3_profile.tagline
self.fields["twitter"].initial = p3_profile.twitter
self.fields["bio"].initial = getattr(self.instance.getBio(), "body", "")
# Determine the value of the image fields
image_url = self.instance.p3_profile.profile_image_url()
if self.instance.image:
selected_image_option = PICTURE_CHOICES.file
elif p3_profile.image_url:
selected_image_option = PICTURE_CHOICES.url
elif p3_profile.image_gravatar:
selected_image_option = PICTURE_CHOICES.gravatar
else:
selected_image_option = PICTURE_CHOICES.none
self.helper = FormHelper()
self.helper.layout = Layout(
HTML("<h1>Personal information</h1>"),
Div(
Div("first_name", css_class="col-md-6"),
Div("last_name", css_class="col-md-6"),
css_class="row",
),
Div(
Div("email", css_class="col-md-6"),
Div("is_minor", css_class="col-md-6 mt-4"),
css_class="row",
),
Div(
Div("phone", css_class="col-md-6"),
Div("gender", css_class="col-md-6"),
css_class="row",
),
HTML("<h1>Profile picture</h1>"),
Div(
HTML(
render_to_string(
"conference/user_panel/forms/profile_settings_picture.html",
context={
"selected_picture_option": selected_image_option,
"profile_image_url": image_url,
# Creating an enum-type accessible in the template
"picture_choices": dict(
[(x[0], x[0]) for x in PICTURE_CHOICES]
),
},
)
),
css_class="row",
),
HTML("<h1>Profile information</h1>"),
Div(Div("tagline", css_class="col-md-12"), css_class="row"),
Div(
Div("personal_homepage", css_class="col-md-4"),
Div("twitter", css_class="col-md-4"),
Div("location", css_class="col-md-4"),
css_class="row",
),
Div(
Div("job_title", css_class="col-md-4"),
Div("company", css_class="col-md-4"),
Div("company_homepage", css_class="col-md-4"),
css_class="row",
),
Div(Div("bio", css_class="col-md-12"), css_class="row"),
HTML("<h1>Profile page visibility</h1>"),
HTML(
"<h5><strong>Speaker profile pages are public by default.</strong> "
"You still have the option to change your preferences in the coming "
"years.</h5>"
),
Div(Div("visibility", css_class="col-md-4"), css_class="row"),
ButtonHolder(
Submit("update", "Update", css_class="btn btn-lg btn-primary")
),
)
def clean_email(self):
value = self.cleaned_data["email"].strip()
user = self.instance.user
if value != user.email and User.objects.filter(email__iexact=value).exists():
raise forms.ValidationError("Email already registered")
return value
def clean_twitter(self):
data = self.cleaned_data.get("twitter", "")
if data.startswith("@"):
data = data[1:]
# Remove http[s]://twitter.com
if data.startswith(('https://twitter.com', 'http://twitter.com')):
data = data.split('/')[-1]
return data
def clean_image(self):
data = self.cleaned_data.get("image")
try:
if data.size > settings.PROFILE_PICTURE_MAX_SIZE:
raise forms.ValidationError("Profile picture too large ( > 2.5mb )")
except AttributeError:
pass
return data
def resolve_image_settings(self, selected_option, image_url, image):
if selected_option == PICTURE_CHOICES.gravatar:
image = None
image_url = ""
image_gravatar = True
elif selected_option == PICTURE_CHOICES.url:
image = None
image_gravatar = False
elif selected_option == PICTURE_CHOICES.file:
image_url = ""
image_gravatar = False
else:
# The default, or when the user selects PICTURE_CHOICES.none
image = None
image_url = ""
image_gravatar = False
return image_gravatar, image_url, image
def save(self, commit=True):
"""
Since this form updates related models, it does not support commit=False.
"""
profile = super().save(commit=True)
profile.setBio(self.cleaned_data.get("bio", ""))
# Resolve image settings.
image_gravatar, image_url, image = self.resolve_image_settings(
selected_option=self.cleaned_data["picture_options"],
image_url=self.cleaned_data.get("image_url"),
image=self.cleaned_data.get("image"),
)
profile.image = image
profile.save()
# Save user fields
user = profile.user
user.first_name = self.cleaned_data["first_name"]
user.last_name = self.cleaned_data["last_name"]
user.email = self.cleaned_data["email"]
user.save()
# Save p3 profile fields
p3_profile = profile.p3_profile
p3_profile.tagline = self.cleaned_data["tagline"]
p3_profile.twitter = self.cleaned_data["twitter"]
p3_profile.image_gravatar = image_gravatar
p3_profile.image_url = image_url
p3_profile.save()
return profile
def get_tickets_for_current_conference(user):
if not user.is_authenticated:
return Ticket.objects.none()
conference = Conference.objects.current()
return Ticket.objects.filter(
Q(fare__conference=conference.code)
& Q(frozen=False)
& Q(orderitem__order___complete=True)
& (Q(user=user) | Q(orderitem__order__user__pk=user.assopy_user.pk))
).annotate(
is_buyer=Case(
When(orderitem__order__user__pk=user.assopy_user.pk, then=Value(True)),
default=Value(False),
output_field=BooleanField(),
)
)
def matrix_token_access(request):
""" Check whether a Matrix embedding token was passed in
"""
#print ('Configured token: %r' % settings.MATRIX_STREAM_EMBEDDING_TOKEN)
if request is None:
return False
# Token check
if settings.MATRIX_STREAM_EMBEDDING_TOKEN is None:
return False
token = request.GET.get('token')
#print ('Found token: %r' % token)
if token is None:
return False
if token != settings.MATRIX_STREAM_EMBEDDING_TOKEN:
return False
# Referer check
if settings.MATRIX_STREAM_EMBEDDING_REFERER is not None:
referrer = request.META.get('HTTP_REFERER')
#print ('Referrer: %r' % referrer)
if referrer is None:
return False
elif referrer.startswith(settings.MATRIX_STREAM_EMBEDDING_REFERER):
# check passes
pass
else:
return False
return True
def get_streams_for_current_conference(user, request=None):
""" Return the list of currently active streams as dictionaries:
- title
- id
- url
"""
if user.is_authenticated:
# Authenticated user: use tickets
fare_codes = set(
get_tickets_for_current_conference(user).values_list("fare__code", flat=True))
elif matrix_token_access(request):
# Use token fares
#print ('Allow Matrix embedding')
fare_codes = settings.MATRIX_STREAM_EMBEDDING_FARES
else:
# No fares available
fare_codes = set()
# Allow filtering by title
if request is not None:
title_filter = request.GET.get('title')
else:
title_filter = None
# Allow filtering by id
if request is not None:
id_filter = request.GET.get('id')
else:
id_filter = None
#print ('User has these fares: %r' % fare_codes)
conference = Conference.objects.current()
now = timezone.now()
stream_sets = list(StreamSet.objects.filter(
Q(conference=conference) &
Q(enabled = True) &
(Q(start_date = None) | Q(start_date__lte = now)) &
(Q(end_date = None) | Q(end_date__gte = now))
))
#print ('Found these stream_sets: %r' % stream_sets)
streams = []
if stream_sets:
reload_date = now + timedelta(hours=12)
else:
reload_date = now + timedelta(minutes=0)
for stream_set in stream_sets:
#print ('Found this stream_set: %r' % stream_set)
assert isinstance(stream_set.streams, list)
for stream in stream_set.streams:
stream_fare_codes = set(stream.get('fare_codes', ()))
#print ('Stream requires these fare codes: %r' % stream_fare_codes)
if stream_fare_codes & fare_codes:
title = stream['title']
id = stream.get('id', '')
url = stream['url']
if title_filter:
if title != title_filter:
continue
if id_filter:
if id != id_filter:
continue
streams.append({
'title': title,
'id': id,
'url': url,
})
end_date = stream_set.end_date
if end_date is not None and end_date < reload_date:
reload_date = end_date
#print ('Found these streams: %r' % l)
data = dict(
streams=streams,
reload_timeout_seconds=(
(reload_date - now).total_seconds() + random.randint(5, 60)),
)
return data
def get_invoices_for_current_conference(user):
return Invoice.objects.filter(
order__user__user=user,
emit_date__year=Conference.objects.current().conference_start.year,
)
def get_proposals_for_current_conference(user):
"""
This goes through TalkSpeaker module, not Talk.created_by to correctly show
cases if people are assigned (as co-speakers) to proposals/talks created by
other people
"""
try:
speaker = user.speaker
except Speaker.DoesNotExist:
return None
talkspeakers = TalkSpeaker.objects.filter(
speaker=speaker, talk__conference=Conference.objects.current().code
)
return [ts.talk for ts in talkspeakers]
def get_orders_for_current_conference(user):
# HACK(artcz) -- because Order doesn't have a link to Conference, we'll
# just filter by current's conference year
year = Conference.objects.current().conference_start.year
return Order.objects.filter(created__year=year, user=user.assopy_user)
urlpatterns = [
re_path(r"^$", user_dashboard, name="dashboard"),
re_path(r"^manage-ticket/(?P<ticket_id>\d+)/$", manage_ticket, name="manage_ticket"),
re_path(r"^assign-ticket/(?P<ticket_id>\d+)/$", assign_ticket, name="assign_ticket"),
re_path(r"^privacy-settings/$", privacy_settings, name="privacy_settings"),
re_path(r"^profile-settings/$", profile_settings, name="profile_settings"),
re_path(
r"^password/change/$",
auth_views.PasswordChangeView.as_view(
template_name="conference/user_panel/password_change.html",
success_url=reverse_lazy("user_panel:password_change_done"),
),
name="password_change",
),
re_path(
r"^password/change/done/$",
auth_views.PasswordChangeDoneView.as_view(
template_name="conference/user_panel/password_change_done.html"
),
name="password_change_done",
),
]
| |
# Source (DCGAN): https://gist.github.com/f0k/738fa2eedd9666b78404ed1751336f56
# Source (WGAN): https://gist.github.com/f0k/f3190ebba6c53887d598d03119ca2066
# Source (LSGAN): https://gist.github.com/f0k/9b0bb51040719eeafec7eba473a9e79b
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import lasagne
from load_mnist_data import load_mnist_data
from utils import iterate_minibatches
############################
# Build Generator Function #
############################
def build_generator(input_var=None):
from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
try:
from lasagne.layers import TransposedConv2DLayer as Deconv2DLayer
except ImportError:
raise ImportError("Your Lasagne is too old. Try the bleeding-edge "
"version: http://lasagne.readthedocs.io/en/latest/"
"user/installation.html#bleeding-edge-version")
try:
from lasagne.layers.dnn import batch_norm_dnn as batch_norm
except ImportError:
from lasagne.layers import batch_norm
from lasagne.nonlinearities import sigmoid
# input: 100dim
layer = InputLayer(shape=(None, 100), input_var=input_var)
# fully-connected layer
layer = batch_norm(DenseLayer(layer, 1024))
# project and reshape
layer = batch_norm(DenseLayer(layer, 128*7*7))
layer = ReshapeLayer(layer, ([0], 128, 7, 7))
# two fractional-stride convolutions
layer = batch_norm(Deconv2DLayer(layer, 64, 5, stride=2, crop='same',
output_size=14))
layer = Deconv2DLayer(layer, 1, 5, stride=2, crop='same', output_size=28,
nonlinearity=sigmoid)
print ("Generator output:", layer.output_shape)
return layer
#########################
# Build Critic Function #
#########################
def build_critic(input_var=None, model_name='wgan'):
from lasagne.layers import (InputLayer, Conv2DLayer, ReshapeLayer,
DenseLayer)
try:
from lasagne.layers.dnn import batch_norm_dnn as batch_norm
except ImportError:
from lasagne.layers import batch_norm
from lasagne.nonlinearities import LeakyRectify, sigmoid
lrelu = LeakyRectify(0.2)
# input: (None, 1, 28, 28)
layer = InputLayer(shape=(None, 1, 28, 28), input_var=input_var)
# two convolutions
layer = batch_norm(Conv2DLayer(layer, 64, 5, stride=2, pad='same',
nonlinearity=lrelu))
layer = batch_norm(Conv2DLayer(layer, 128, 5, stride=2, pad='same',
nonlinearity=lrelu))
# fully-connected layer
layer = batch_norm(DenseLayer(layer, 1024, nonlinearity=lrelu))
# output layer
if model_name == 'dcgan':
layer = DenseLayer(layer, 1, nonlinearity=sigmoid)
elif model_name == 'wgan':
layer = DenseLayer(layer, 1, nonlinearity=None, b=None)
elif model_name == 'lsgan':
layer = DenseLayer(layer, 1, nonlinearity=None)
print ("critic output:", layer.output_shape)
return layer
def run_gan(
num_epochs=1000,
epochsize=100,
batchsize=64,
initial_eta=5e-5,
clip=0.01,
model_name='wgan',
optimizer_name = 'rmsprop'
):
#############
# Load Data #
#############
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_mnist_data()
input_shape = X_train[0].shape
shape = (None, input_shape[0], input_shape[1], input_shape[2])
############################################
# allocate symbolic variables for the data #
############################################
noise_var = T.matrix('noise')
input_var = T.tensor4('inputs')
####################################################
# BUILD MODEL (The model is a function in Lasagne) #
####################################################
print('... building the model')
generator = build_generator(noise_var)
critic = build_critic(input_var, model_name=model_name)
#####################
# Training Function #
#####################
# Create expression for passing real data through the critic
real_out = lasagne.layers.get_output(critic)
# Create expression for passing fake data through the critic
fake_out = lasagne.layers.get_output(critic,
lasagne.layers.get_output(generator))
# Create loss expressions
if model_name == 'dcgan':
generator_loss = lasagne.objectives.binary_crossentropy(fake_out, 1).mean()
critic_loss = (lasagne.objectives.binary_crossentropy(real_out, 1)
+ lasagne.objectives.binary_crossentropy(fake_out, 0)).mean()
elif model_name == 'wgan':
generator_loss = -fake_out.mean()
critic_loss = fake_out.mean() - real_out.mean()
elif model_name == 'lsgan':
# a, b, c = -1, 1, 0 # Equation (8) in the paper
a, b, c = 0, 1, 1 # Equation (9) in the paper
generator_loss = lasagne.objectives.squared_error(fake_out, c).mean()
critic_loss = (lasagne.objectives.squared_error(real_out, b).mean() +
lasagne.objectives.squared_error(fake_out, a).mean())
# Create update expressions for training
generator_params = lasagne.layers.get_all_params(generator, trainable=True)
critic_params = lasagne.layers.get_all_params(critic, trainable=True)
eta = theano.shared(lasagne.utils.floatX(initial_eta))
if optimizer_name == 'rmsprop':
generator_updates = lasagne.updates.rmsprop(
generator_loss, generator_params, learning_rate=eta)
critic_updates = lasagne.updates.rmsprop(
critic_loss, critic_params, learning_rate=eta)
elif optimizer_name == 'adam':
generator_updates = lasagne.updates.adam(
generator_loss, generator_params, learning_rate=eta, beta1=0.5)
critic_updates = lasagne.updates.adam(
critic_loss, critic_params, learning_rate=eta, beta1=0.5)
if clip:
# Clip critic parameters in a limited range around zero (except biases)
for param in lasagne.layers.get_all_params(critic, trainable=True,
regularizable=True):
critic_updates[param] = T.clip(critic_updates[param], -clip, clip)
# Instantiate a symbolic noise generator to use for training
srng = RandomStreams(seed=np.random.randint(2147462579, size=6))
noise = srng.uniform((batchsize, 100))
# Compile functions performing a training step on a mini-batch (according
# to the updates dictionary) and returning the corresponding score:
generator_train_fn = theano.function([], generator_loss,
givens={noise_var: noise},
updates=generator_updates)
critic_train_fn = theano.function([input_var], critic_loss,
givens={noise_var: noise},
updates=critic_updates)
# Compile another function generating some data
gen_fn = theano.function([noise_var],
lasagne.layers.get_output(generator,
deterministic=True))
###############
# TRAIN MODEL #
###############
print('... Starting training')
# We create an infinite supply of batches (as an iterable generator):
batches = iterate_minibatches(X_train, y_train, batchsize, shuffle=True)
# We iterate over epochs:
generator_updates = 0
for epoch in range(num_epochs):
start_time = time.time()
# In each epoch, we do `epochsize` generator and critic updates.
critic_losses = []
generator_losses = []
for _ in range(epochsize):
inputs, targets = next(batches)
critic_losses.append(critic_train_fn(inputs))
generator_losses.append(generator_train_fn())
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" generator loss: {}".format(np.mean(generator_losses)))
print(" critic loss: {}".format(np.mean(critic_losses)))
# And finally, we plot some generated data
samples = gen_fn(lasagne.utils.floatX(np.random.rand(42, 100)))
try:
import matplotlib.pyplot as plt
except ImportError:
pass
else:
plt.imsave('lsgan_mnist_samples.png',
(samples.reshape(6, 7, 28, 28)
.transpose(0, 2, 1, 3)
.reshape(6*28, 7*28)),
cmap='gray')
# After half the epochs, we start decaying the learn rate towards zero
if epoch >= num_epochs // 2:
progress = float(epoch) / num_epochs
eta.set_value(lasagne.utils.floatX(initial_eta*2*(1 - progress)))
###################
# SAVE/LOAD MODEL #
###################
# Optionally, you could now dump the network weights to a file like this:
np.savez('lsgan_mnist_gen.npz', *lasagne.layers.get_all_param_values(generator))
np.savez('lsgan_mnist_crit.npz', *lasagne.layers.get_all_param_values(critic))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
"""
Choose a model (each model has slight modifications from the paper):
dcgan (Deep Convolutional Generative Adversarial Networks)
wgan (Wassrestein Generative Adversarial Network)
lsgan (Least Squared Generative Adversarial Ntwork)
Choose an optimiation method:
rmsprop
adam
"""
run_gan(model_name='lsgan', optimizer_name='adam')
| |
"""
Receive signals from a keyboard and use it as a remote control.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/keyboard_remote/
"""
# pylint: disable=import-error
import threading
import logging
import os
import time
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
REQUIREMENTS = ['evdev==0.6.1']
_LOGGER = logging.getLogger(__name__)
DEVICE_DESCRIPTOR = 'device_descriptor'
DEVICE_ID_GROUP = 'Device description'
DEVICE_NAME = 'device_name'
DOMAIN = 'keyboard_remote'
ICON = 'mdi:remote'
KEY_CODE = 'key_code'
KEY_VALUE = {'key_up': 0, 'key_down': 1, 'key_hold': 2}
KEYBOARD_REMOTE_COMMAND_RECEIVED = 'keyboard_remote_command_received'
KEYBOARD_REMOTE_CONNECTED = 'keyboard_remote_connected'
KEYBOARD_REMOTE_DISCONNECTED = 'keyboard_remote_disconnected'
TYPE = 'type'
CONFIG_SCHEMA = vol.Schema({
DOMAIN:
vol.All(cv.ensure_list, [vol.Schema({
vol.Exclusive(DEVICE_DESCRIPTOR, DEVICE_ID_GROUP): cv.string,
vol.Exclusive(DEVICE_NAME, DEVICE_ID_GROUP): cv.string,
vol.Optional(TYPE, default='key_up'):
vol.All(cv.string, vol.Any('key_up', 'key_down', 'key_hold'))
})])
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the keyboard_remote."""
config = config.get(DOMAIN)
keyboard_remote = KeyboardRemote(hass, config)
def _start_keyboard_remote(_event):
keyboard_remote.run()
def _stop_keyboard_remote(_event):
keyboard_remote.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_keyboard_remote)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_keyboard_remote)
return True
class KeyboardRemoteThread(threading.Thread):
"""This interfaces with the inputdevice using evdev."""
def __init__(self, hass, device_name, device_descriptor, key_value):
"""Construct a thread listening for events on one device."""
self.hass = hass
self.device_name = device_name
self.device_descriptor = device_descriptor
self.key_value = key_value
if self.device_descriptor:
self.device_id = self.device_descriptor
else:
self.device_id = self.device_name
self.dev = self._get_keyboard_device()
if self.dev is not None:
_LOGGER.debug("Keyboard connected, %s", self.device_id)
else:
_LOGGER.debug(
"Keyboard not connected, %s. "
"Check /dev/input/event* permissions", self.device_id)
id_folder = '/dev/input/by-id/'
if os.path.isdir(id_folder):
from evdev import InputDevice, list_devices
device_names = [InputDevice(file_name).name
for file_name in list_devices()]
_LOGGER.debug(
"Possible device names are: %s. "
"Possible device descriptors are %s: %s",
device_names, id_folder, os.listdir(id_folder))
threading.Thread.__init__(self)
self.stopped = threading.Event()
self.hass = hass
def _get_keyboard_device(self):
"""Get the keyboard device."""
from evdev import InputDevice, list_devices
if self.device_name:
devices = [InputDevice(file_name) for file_name in list_devices()]
for device in devices:
if self.device_name == device.name:
return device
elif self.device_descriptor:
try:
device = InputDevice(self.device_descriptor)
except OSError:
pass
else:
return device
return None
def run(self):
"""Run the loop of the KeyboardRemote."""
from evdev import categorize, ecodes
if self.dev is not None:
self.dev.grab()
_LOGGER.debug("Interface started for %s", self.dev)
while not self.stopped.isSet():
# Sleeps to ease load on processor
time.sleep(.05)
if self.dev is None:
self.dev = self._get_keyboard_device()
if self.dev is not None:
self.dev.grab()
self.hass.bus.fire(
KEYBOARD_REMOTE_CONNECTED,
{
DEVICE_DESCRIPTOR: self.device_descriptor,
DEVICE_NAME: self.device_name
}
)
_LOGGER.debug("Keyboard re-connected, %s", self.device_id)
else:
continue
try:
event = self.dev.read_one()
except IOError: # Keyboard Disconnected
self.dev = None
self.hass.bus.fire(
KEYBOARD_REMOTE_DISCONNECTED,
{
DEVICE_DESCRIPTOR: self.device_descriptor,
DEVICE_NAME: self.device_name
}
)
_LOGGER.debug("Keyboard disconnected, %s", self.device_id)
continue
if not event:
continue
if event.type is ecodes.EV_KEY and event.value is self.key_value:
_LOGGER.debug(categorize(event))
self.hass.bus.fire(
KEYBOARD_REMOTE_COMMAND_RECEIVED,
{
KEY_CODE: event.code,
DEVICE_DESCRIPTOR: self.device_descriptor,
DEVICE_NAME: self.device_name
}
)
class KeyboardRemote:
"""Sets up one thread per device."""
def __init__(self, hass, config):
"""Construct a KeyboardRemote interface object."""
self.threads = []
for dev_block in config:
device_descriptor = dev_block.get(DEVICE_DESCRIPTOR)
device_name = dev_block.get(DEVICE_NAME)
key_value = KEY_VALUE.get(dev_block.get(TYPE, 'key_up'))
if device_descriptor is not None\
or device_name is not None:
thread = KeyboardRemoteThread(
hass, device_name, device_descriptor, key_value)
self.threads.append(thread)
def run(self):
"""Run all event listener threads."""
for thread in self.threads:
thread.start()
def stop(self):
"""Stop all event listener threads."""
for thread in self.threads:
thread.stopped.set()
| |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/renderPDF.py
# renderPDF - draws Drawings onto a canvas
__version__=''' $Id: renderPDF.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""Render Drawing objects within others PDFs or standalone
Usage::
import renderpdf
renderpdf.draw(drawing, canvas, x, y)
Execute the script to see some test drawings.
changed
"""
from reportlab.graphics.shapes import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.lib.utils import getStringIO
from reportlab import rl_config
from renderbase import Renderer, StateTracker, getStateDelta, renderScaledDrawing
# the main entry point for users...
def draw(drawing, canvas, x, y, showBoundary=rl_config._unset_):
"""As it says"""
R = _PDFRenderer()
R.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
class _PDFRenderer(Renderer):
"""This draws onto a PDF document. It needs to be a class
rather than a function, as some PDF-specific state tracking is
needed outside of the state info in the SVG model."""
def __init__(self):
self._stroke = 0
self._fill = 0
self._tracker = StateTracker()
def drawNode(self, node):
"""This is the recursive method called for each node
in the tree"""
#print "pdf:drawNode", self
#if node.__class__ is Wedge: stop
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.saveState()
#apply state changes
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
#draw the object, or recurse
self.drawNodeDispatcher(node)
self._tracker.pop()
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.restoreState()
def drawRect(self, rect):
if rect.rx == rect.ry == 0:
#plain old rectangle
self._canvas.rect(
rect.x, rect.y,
rect.width, rect.height,
stroke=self._stroke,
fill=self._fill
)
else:
#cheat and assume ry = rx; better to generalize
#pdfgen roundRect function. TODO
self._canvas.roundRect(
rect.x, rect.y,
rect.width, rect.height, rect.rx,
fill=self._fill,
stroke=self._stroke
)
def drawImage(self, image):
path = image.path
# currently not implemented in other renderers
if path and (hasattr(path,'mode') or os.path.exists(image.path)):
self._canvas.drawInlineImage(
path,
image.x, image.y,
image.width, image.height
)
def drawLine(self, line):
if self._stroke:
self._canvas.line(line.x1, line.y1, line.x2, line.y2)
def drawCircle(self, circle):
self._canvas.circle(
circle.cx, circle.cy, circle.r,
fill=self._fill,
stroke=self._stroke
)
def drawPolyLine(self, polyline):
if self._stroke:
assert len(polyline.points) >= 2, 'Polyline must have 2 or more points'
head, tail = polyline.points[0:2], polyline.points[2:],
path = self._canvas.beginPath()
path.moveTo(head[0], head[1])
for i in range(0, len(tail), 2):
path.lineTo(tail[i], tail[i+1])
self._canvas.drawPath(path)
def drawWedge(self, wedge):
centerx, centery, radius, startangledegrees, endangledegrees = \
wedge.centerx, wedge.centery, wedge.radius, wedge.startangledegrees, wedge.endangledegrees
yradius, radius1, yradius1 = wedge._xtraRadii()
if yradius is None: yradius = radius
angle = endangledegrees-startangledegrees
path = self._canvas.beginPath()
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None):
path.moveTo(centerx, centery)
path.arcTo(centerx-radius, centery-yradius, centerx+radius, centery+yradius,
startangledegrees, angle)
else:
path.arc(centerx-radius, centery-yradius, centerx+radius, centery+yradius,
startangledegrees, angle)
path.arcTo(centerx-radius1, centery-yradius1, centerx+radius1, centery+yradius1,
endangledegrees, -angle)
path.close()
self._canvas.drawPath(path,
fill=self._fill,
stroke=self._stroke)
def drawEllipse(self, ellipse):
#need to convert to pdfgen's bounding box representation
x1 = ellipse.cx - ellipse.rx
x2 = ellipse.cx + ellipse.rx
y1 = ellipse.cy - ellipse.ry
y2 = ellipse.cy + ellipse.ry
self._canvas.ellipse(x1,y1,x2,y2,fill=self._fill,stroke=self._stroke)
def drawPolygon(self, polygon):
assert len(polygon.points) >= 2, 'Polyline must have 2 or more points'
head, tail = polygon.points[0:2], polygon.points[2:],
path = self._canvas.beginPath()
path.moveTo(head[0], head[1])
for i in range(0, len(tail), 2):
path.lineTo(tail[i], tail[i+1])
path.close()
self._canvas.drawPath(
path,
stroke=self._stroke,
fill=self._fill
)
def drawString(self, stringObj):
if self._fill:
S = self._tracker.getState()
text_anchor, x, y, text, enc = S['textAnchor'], stringObj.x,stringObj.y,stringObj.text, stringObj.encoding
if not text_anchor in ['start','inherited']:
font, font_size = S['fontName'], S['fontSize']
textLen = stringWidth(text, font, font_size, enc)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen*0.5
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,text,textLen,font,font_size,enc)
else:
raise ValueError, 'bad value for textAnchor '+str(text_anchor)
t = self._canvas.beginText(x,y)
t.textLine(text)
self._canvas.drawText(t)
def drawPath(self, path):
from reportlab.graphics.shapes import _renderPath
pdfPath = self._canvas.beginPath()
drawFuncs = (pdfPath.moveTo, pdfPath.lineTo, pdfPath.curveTo, pdfPath.close)
isClosed = _renderPath(path, drawFuncs)
if isClosed:
fill = self._fill
else:
fill = 0
if path.isClipPath:
self._canvas.clipPath(pdfPath, fill=fill, stroke=self._stroke)
else:
self._canvas.drawPath(pdfPath,
fill=fill,
stroke=self._stroke)
def setStrokeColor(self,c):
self._canvas.setStrokeColor(c)
def setFillColor(self,c):
self._canvas.setFillColor(c)
def applyStateChanges(self, delta, newState):
"""This takes a set of states, and outputs the PDF operators
needed to set those properties"""
for key, value in delta.items():
if key == 'transform':
self._canvas.transform(value[0], value[1], value[2],
value[3], value[4], value[5])
elif key == 'strokeColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
if value is None:
self._stroke = 0
else:
self._stroke = 1
self.setStrokeColor(value)
elif key == 'strokeWidth':
self._canvas.setLineWidth(value)
elif key == 'strokeLineCap': #0,1,2
self._canvas.setLineCap(value)
elif key == 'strokeLineJoin':
self._canvas.setLineJoin(value)
# elif key == 'stroke_dasharray':
# self._canvas.setDash(array=value)
elif key == 'strokeDashArray':
if value:
if isinstance(value,(list,tuple)) and len(value)==2 and isinstance(value[1],(tuple,list)):
phase = value[0]
value = value[1]
else:
phase = 0
self._canvas.setDash(value,phase)
else:
self._canvas.setDash()
elif key == 'fillColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
if value is None:
self._fill = 0
else:
self._fill = 1
self.setFillColor(value)
elif key in ['fontSize', 'fontName']:
# both need setting together in PDF
# one or both might be in the deltas,
# so need to get whichever is missing
fontname = delta.get('fontName', self._canvas._fontname)
fontsize = delta.get('fontSize', self._canvas._fontsize)
self._canvas.setFont(fontname, fontsize)
elif key=='fillOpacity':
if value is not None:
self._canvas.setFillAlpha(value)
elif key=='strokeOpacity':
if value is not None:
self._canvas.setStrokeAlpha(value)
elif key=='fillOverprint':
self._canvas.setFillOverprint(value)
elif key=='strokeOverprint':
self._canvas.setStrokeOverprint(value)
elif key=='overprintMask':
self._canvas.setOverprintMask(value)
from reportlab.platypus import Flowable
class GraphicsFlowable(Flowable):
"""Flowable wrapper around a Pingo drawing"""
def __init__(self, drawing):
self.drawing = drawing
self.width = self.drawing.width
self.height = self.drawing.height
def draw(self):
draw(self.drawing, self.canv, 0, 0)
def drawToFile(d, fn, msg="", showBoundary=rl_config._unset_, autoSize=1):
"""Makes a one-page PDF with just the drawing.
If autoSize=1, the PDF will be the same size as
the drawing; if 0, it will place the drawing on
an A4 page with a title above it - possibly overflowing
if too big."""
d = renderScaledDrawing(d)
c = Canvas(fn)
if msg:
c.setFont(rl_config.defaultGraphicsFontName, 36)
c.drawString(80, 750, msg)
c.setTitle(msg)
if autoSize:
c.setPageSize((d.width, d.height))
draw(d, c, 0, 0, showBoundary=showBoundary)
else:
#show with a title
c.setFont(rl_config.defaultGraphicsFontName, 12)
y = 740
i = 1
y = y - d.height
draw(d, c, 80, y, showBoundary=showBoundary)
c.showPage()
c.save()
if sys.platform=='mac' and not hasattr(fn, "write"):
try:
import macfs, macostools
macfs.FSSpec(fn).SetCreatorType("CARO", "PDF ")
macostools.touched(fn)
except:
pass
def drawToString(d, msg="", showBoundary=rl_config._unset_,autoSize=1):
"Returns a PDF as a string in memory, without touching the disk"
s = getStringIO()
drawToFile(d, s, msg=msg, showBoundary=showBoundary,autoSize=autoSize)
return s.getvalue()
#########################################################
#
# test code. First, define a bunch of drawings.
# Routine to draw them comes at the end.
#
#########################################################
def test():
from reportlab.graphics.shapes import _baseGFontName, _baseGFontNameBI
c = Canvas('renderPDF.pdf')
c.setFont(_baseGFontName, 36)
c.drawString(80, 750, 'Graphics Test')
# print all drawings and their doc strings from the test
# file
#grab all drawings from the test module
from reportlab.graphics import testshapes
drawings = []
for funcname in dir(testshapes):
if funcname[0:10] == 'getDrawing':
drawing = eval('testshapes.' + funcname + '()') #execute it
docstring = eval('testshapes.' + funcname + '.__doc__')
drawings.append((drawing, docstring))
#print in a loop, with their doc strings
c.setFont(_baseGFontName, 12)
y = 740
i = 1
for (drawing, docstring) in drawings:
assert (docstring is not None), "Drawing %d has no docstring!" % i
if y < 300: #allows 5-6 lines of text
c.showPage()
y = 740
# draw a title
y = y - 30
c.setFont(_baseGFontNameBI,12)
c.drawString(80, y, 'Drawing %d' % i)
c.setFont(_baseGFontName,12)
y = y - 14
textObj = c.beginText(80, y)
textObj.textLines(docstring)
c.drawText(textObj)
y = textObj.getY()
y = y - drawing.height
draw(drawing, c, 80, y)
i = i + 1
if y!=740: c.showPage()
c.save()
print 'saved renderPDF.pdf'
##def testFlowable():
## """Makes a platypus document"""
## from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
## from reportlab.lib.styles import getSampleStyleSheet
## styles = getSampleStyleSheet()
## styNormal = styles['Normal']
##
## doc = SimpleDocTemplate('test_flowable.pdf')
## story = []
## story.append(Paragraph("This sees is a drawing can work as a flowable", styNormal))
##
## import testdrawings
## drawings = []
##
## for funcname in dir(testdrawings):
## if funcname[0:10] == 'getDrawing':
## drawing = eval('testdrawings.' + funcname + '()') #execute it
## docstring = eval('testdrawings.' + funcname + '.__doc__')
## story.append(Paragraph(docstring, styNormal))
## story.append(Spacer(18,18))
## story.append(drawing)
## story.append(Spacer(36,36))
##
## doc.build(story)
## print 'saves test_flowable.pdf'
if __name__=='__main__':
test()
#testFlowable()
| |
"""Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import dirname, join, exists, isdir
import logging
from distutils.version import LooseVersion
import numpy as np
import joblib
from joblib import Memory
from .base import get_data_home, _fetch_remote, RemoteFileMetadata
from ..utils import Bunch
logger = logging.getLogger(__name__)
# The original data can be found in:
# http://vis-www.cs.umass.edu/lfw/lfw.tgz
ARCHIVE = RemoteFileMetadata(
filename='lfw.tgz',
url='https://ndownloader.figshare.com/files/5976018',
checksum=('055f7d9c632d7370e6fb4afc7468d40f'
'970c34a80d4c6f50ffec63f5a8d536c0'))
# The original funneled data can be found in:
# http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz
FUNNELED_ARCHIVE = RemoteFileMetadata(
filename='lfw-funneled.tgz',
url='https://ndownloader.figshare.com/files/5976015',
checksum=('b47c8422c8cded889dc5a13418c4bc2a'
'bbda121092b3533a83306f90d900100a'))
# The original target data can be found in:
# http://vis-www.cs.umass.edu/lfw/pairsDevTrain.txt',
# http://vis-www.cs.umass.edu/lfw/pairsDevTest.txt',
# http://vis-www.cs.umass.edu/lfw/pairs.txt',
TARGETS = (
RemoteFileMetadata(
filename='pairsDevTrain.txt',
url='https://ndownloader.figshare.com/files/5976012',
checksum=('1d454dada7dfeca0e7eab6f65dc4e97a'
'6312d44cf142207be28d688be92aabfa')),
RemoteFileMetadata(
filename='pairsDevTest.txt',
url='https://ndownloader.figshare.com/files/5976009',
checksum=('7cb06600ea8b2814ac26e946201cdb30'
'4296262aad67d046a16a7ec85d0ff87c')),
RemoteFileMetadata(
filename='pairs.txt',
url='https://ndownloader.figshare.com/files/5976006',
checksum=('ea42330c62c92989f9d7c03237ed5d59'
'1365e89b3e649747777b70e692dc1592')),
)
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def _check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if not exists(lfw_home):
makedirs(lfw_home)
for target in TARGETS:
target_filepath = join(lfw_home, target.filename)
if not exists(target_filepath):
if download_if_missing:
logger.info("Downloading LFW metadata: %s", target.url)
_fetch_remote(target, dirname=lfw_home)
else:
raise IOError("%s is missing" % target_filepath)
if funneled:
data_folder_path = join(lfw_home, "lfw_funneled")
archive = FUNNELED_ARCHIVE
else:
data_folder_path = join(lfw_home, "lfw")
archive = ARCHIVE
if not exists(data_folder_path):
archive_path = join(lfw_home, archive.filename)
if not exists(archive_path):
if download_if_missing:
logger.info("Downloading LFW data (~200MB): %s",
archive.url)
_fetch_remote(archive, dirname=lfw_home)
else:
raise IOError("%s is missing" % archive_path)
import tarfile
logger.debug("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# import PIL only when needed
from ..externals._pilutil import imread, imresize
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.debug("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True, return_X_y=False):
"""Load the Labeled Faces in the Wild (LFW) people dataset \
(classification).
Download it if necessary.
================= =======================
Classes 5749
Samples total 13233
Dimensionality 5828
Features real, between 0 and 255
================= =======================
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : boolean, default=False.
If True, returns ``(dataset.data, dataset.target)`` instead of a Bunch
object. See below for more information about the `dataset.data` and
`dataset.target` object.
.. versionadded:: 0.20
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
lfw_home, data_folder_path = _check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.debug('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
if LooseVersion(joblib.__version__) < LooseVersion('0.12'):
# Deal with change of API in joblib
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
else:
m = Memory(location=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
X = faces.reshape(len(faces), -1)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'lfw.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return X, target
# pack the results as a Bunch instance
return Bunch(data=X, images=faces,
target=target, target_names=target_names,
DESCR=fdescr)
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.decode().strip().split('\t') for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Load the Labeled Faces in the Wild (LFW) pairs dataset (classification).
Download it if necessary.
================= =======================
Classes 5749
Samples total 13233
Dimensionality 5828
Features real, between 0 and 255
================= =======================
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild_dataset>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit-learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on ``subset``
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = _check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.debug('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
if LooseVersion(joblib.__version__) < LooseVersion('0.12'):
# Deal with change of API in joblib
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
else:
m = Memory(location=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'lfw.rst')) as rst_file:
fdescr = rst_file.read()
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR=fdescr)
| |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
from datetime import datetime, timedelta
from django.contrib import messages
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.db.models import Case, When, Count, Sum, Min
from django.db.models import Q, Avg
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.views.generic import TemplateView
from django.views.generic.edit import CreateView, UpdateView, DeleteView, ModelFormMixin
from .forms import *
from django.utils import timezone
from api.static_methods import *
from vtn.tasks import update_event_statuses
from collections import OrderedDict
from django.conf import settings
class CustomerCreate(CreateView):
form_class = CustomerForm
template_name = 'vtn/customer_create_form.html'
success_url = reverse_lazy('vtn:home')
class CustomerUpdate(UpdateView):
model = Customer
success_url = reverse_lazy('vtn:home')
fields = ['name', 'utility_id']
class DREventDelete(DeleteView):
model = DREvent
template_name = "vtn/dr_event_confirm_delete.html"
class CustomerDetailView(UpdateView):
template_name = "vtn/customer_detail.html"
model = Customer
fields = '__all__'
def get_context_data(self, **kwargs):
context = super(CustomerDetailView, self).get_context_data(**kwargs)
customer = Customer.objects.get(pk=(self.kwargs['pk']))
context['sites'] = customer.site_set.all()
context['customer'] = customer
return context
def post(self, request, *args, **kwargs):
if 'delete-customer' in request.POST:
Customer.objects.get(pk=self.kwargs['pk']).delete()
return HttpResponseRedirect(reverse_lazy('vtn:home'))
else:
return super(CustomerDetailView, self).post(request, *args, **kwargs)
class SiteDetailView(UpdateView):
template_name = "vtn/site_detail.html"
model = Site
form_class = SiteForm
def get_context_data(self, **kwargs):
context = super(SiteDetailView, self).get_context_data(**kwargs)
context['customer'] = self.object.customer
context['ven_id'] = self.object.ven_id
context['ven_name'] = self.object.ven_name
return context
def get_form(self, form_class=None):
form = super(SiteDetailView, self).get_form(form_class)
form.fields["dr_programs"].queryset = DRProgram.objects.all().order_by('name')
return form
# Prepopulate dr programs with already chosen dr programs
def get_initial(self):
initial = super(SiteDetailView, self).get_initial()
initial['dr_programs'] = self.object.drprogram_set.all()
return initial
def post(self, request, *args, **kwargs):
if 'delete_site' in request.POST:
Site.objects.get(pk=request.POST['pk']).delete()
return HttpResponseRedirect(reverse_lazy('vtn:customer_detail',
kwargs={'pk': request.POST['customer']}))
else:
# If the DR Programs were altered
if 'dr_programs' in request.POST:
dr_programs = DRProgram.objects.all()
for program in request.POST.getlist('dr_programs'):
dr_program = dr_programs.get(pk=program)
if dr_program not in self.get_object().drprogram_set.all():
dr_program.sites.add(self.get_object())
dr_program.save()
return super(SiteDetailView, self).post(request, *args, **kwargs)
class CreateSiteView(CreateView):
template_name = "vtn/create_site.html"
model = Site
form_class = SiteForm
def get_initial(self):
customer = Customer.objects.get(pk=self.kwargs['pk'])
return {
'customer': customer
}
def get_context_data(self, **kwargs):
context = super(CreateSiteView, self).get_context_data(**kwargs)
context['customer'] = Customer.objects.get(pk=self.kwargs['pk'])
return context
def get_success_url(self):
return reverse_lazy('vtn:customer_detail',
kwargs={'pk': self.kwargs['pk']})
def post(self, request, *args, **kwargs):
form = SiteForm(request.POST)
self.object = None
if form.is_valid():
self.object = form.save(commit=False)
if not self.object.ven_id:
self.object.ven_id = get_new_ven_ID()
self.object.save()
if 'dr_programs' in request.POST:
dr_programs = DRProgram.objects.all()
for program in request.POST.getlist('dr_programs'):
dr_program = dr_programs.get(pk=program)
dr_program.sites.add(self.object)
dr_program.save()
return HttpResponseRedirect(reverse_lazy('vtn:customer_detail',
kwargs={'pk': request.POST['customer']}))
else:
return super(CreateSiteView, self).form_invalid(form)
def get_new_ven_ID():
all_sites = [int(s.ven_id) for s in Site.objects.all()]
all_sites.sort()
ven_id = str(all_sites[-1] + 1) if len(all_sites) > 0 else '0'
return ven_id
def delete_dr_event(request, pk):
"""
:param pk: the pk of the event that is being cancelled
:param request: request object
:return: redirects user to homepage
"""
old_dr_event = DREvent.objects.get(pk=pk)
new_dr_event = old_dr_event
new_dr_event.pk = None
new_dr_event.deleted = True
new_dr_event.modification_number = old_dr_event.modification_number + 1
new_dr_event.status = 'cancelled'
new_dr_event.save()
old_dr_event = DREvent.objects.get(pk=pk)
old_dr_event.superseded = True
old_dr_event.save()
site_events = SiteEvent.objects.filter(dr_event=old_dr_event)
for site_event in site_events:
site_event.dr_event = new_dr_event
site_event.ven_status = 'not_told'
site_event.status = 'cancelled'
site_event.save()
return HttpResponseRedirect(reverse_lazy('vtn:home'))
def cancel_dr_event(request, pk):
"""
:param pk: the pk of the event that is being cancelled
:param request: request object
:return: redirects user to homepage
"""
old_dr_event = DREvent.objects.get(pk=pk)
new_dr_event = old_dr_event
new_dr_event.pk = None
new_dr_event.deleted = True
new_dr_event.modification_number = old_dr_event.modification_number + 1
new_dr_event.status = 'cancelled'
new_dr_event.save()
old_dr_event = DREvent.objects.get(pk=pk)
old_dr_event.superseded = True
old_dr_event.save()
site_events = SiteEvent.objects.filter(dr_event=old_dr_event)
for site_event in site_events:
site_event.dr_event = new_dr_event
site_event.ven_status = 'not_told'
site_event.status = 'cancelled'
site_event.save()
return HttpResponseRedirect(reverse_lazy('vtn:home'))
def change_password(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect(reverse_lazy('vtn:home'))
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'vtn/change_password.html', {
'form': form
})
def dr_event_export(request, pk):
"""
This function does the actual exporting of a given
DR Event's data
"""
event = DREvent.objects.get(pk=pk)
sites = Site.objects.filter(siteevent__dr_event=event)
t_data = Telemetry.objects.filter(site__in=sites) \
.filter(reported_on__range=(event.start, event.end)) \
.order_by('-reported_on')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="dr-events.csv"'
writer = csv.writer(response)
writer.writerow(['DR Program', 'Site', 'Time', 'Baseline Power (kw)', 'Measured Power (kw)'])
for datum in t_data:
writer.writerow([event.dr_program, datum.site, datum.created_on.strftime("%Y-%m-%d %I:%M:%S %p"),
datum.baseline_power_kw, datum.measured_power_kw])
return response
def report(request):
"""
This function gets the initial data for the 'Report' page.
"""
initial_data = DREvent.objects.all().filter(superseded=False).filter(deleted=False) \
.order_by('-start') \
.annotate(numSites=Count('sites')) \
.select_related('dr_program')
form = DREventFormFilter()
return render(request, 'vtn/dr_event_export_filter.html', {'data': initial_data, 'form': form})
def get_more_tables(request):
"""Function that is called to update the DREvent list on the export page.
Args:
request : request from an Ajax "GET" call, with a dictionary
of what filters are going to be used
Returns:
HTML : An updated table if a filter has been applied, or the filter forms
and a 'fresh' table if the "Clear Filters" button has been pressed.
"""
form = DREventFormFilter()
final_events = DREvent.objects.all().order_by('-start') \
.annotate(numSites=Count('sites')) \
.select_related('dr_program') \
.filter(~Q(status='cancelled')).filter(~Q(status='CANCELED')) \
.filter(superseded=False) \
.filter(deleted=False)
# If the clear filters button was pressed, return all the DR Events
if 'clearFilters' in request.GET:
return render(request, 'vtn/clean_dr_event_filter.html', {'data': final_events, 'form': form})
else:
# Get submitted filters
dr_program_num = request.GET.get('drprogram', '')
date_range = request.GET.get('daterange', '')
# If there is a DR Program filter
if dr_program_num != '':
dr_programs = DRProgram.objects.all().order_by('name')
programs = dr_programs.get(pk=dr_program_num)
final_events = final_events.filter(dr_program=programs)
# If there is a date-range filter
if date_range != '':
date_list = [datetime.strptime(x.strip(), '%m/%d/%Y')for x in date_range.split('-')]
start, end = date_list[0], date_list[1]
end = end + timedelta(hours=23, minutes=59, seconds=59)
final_events = final_events.filter(Q(start__gte=start, start__lte=end))
return render(request, 'vtn/get_more_tables.html', {'data': final_events})
class DREventAdd(CreateView):
template_name = 'vtn/dr_event_form.html'
form_class = DREventForm
model = DREvent
success_url = reverse_lazy("vtn:home")
def get_form(self, form_class=form_class):
form = super(DREventAdd, self).get_form(form_class)
form.fields["sites"].queryset = Site.objects.all() \
.select_related('customer') \
.order_by('customer__name')
form.fields['scheduled_notification_time'].initial = (timezone.now() + timedelta(hours=1))
form.fields['start'].initial = (timezone.now() + timedelta(hours=2))
form.fields['end'].initial = (timezone.now() + timedelta(hours=3))
return form
def form_valid(self, form):
self.object = form.save(commit=False)
dr_events = DREvent.objects.all().order_by('-event_id')
try:
latest_event_id = dr_events[0].event_id
except IndexError:
latest_event_id = 0
self.object.event_id = latest_event_id + 1
self.object.save()
# Create the site events
for site in form.cleaned_data['sites']:
s = SiteEvent()
s.dr_event = self.object
s.site = site
s.status = 'far'
s.last_status_time = timezone.now()
s.opt_in = 'none'
s.save()
return super(ModelFormMixin, self).form_valid(form)
# The difference between this view and DREventAdd is that
# this view is called when editing a DR Event.
class DREventCreate(CreateView):
template_name = "vtn/dr_event_update_form.html"
model = DREvent
form_class = DREventUpdateForm
def get_form(self, form_class=form_class):
form = super(DREventCreate, self).get_form(form_class)
dr_event = DREvent.objects.get(pk=self.kwargs['pk'])
dr_program = dr_event.dr_program
queryset_sites = dr_program.sites.all().select_related('customer').order_by('customer')
form.fields["sites"].queryset = queryset_sites
dr_event = DREvent.objects.get(pk=self.kwargs['pk'])
form.fields['dr_program'].initial = dr_event.dr_program
form.fields['start'].initial = dr_event.start
form.fields['end'].initial = dr_event.end
form.fields['scheduled_notification_time'].initial = dr_event.scheduled_notification_time
form.fields['modification_number'].initial = dr_event.modification_number + 1
form.fields['status'].initial = dr_event.status
site_events = SiteEvent.objects.filter(dr_event=dr_event).filter(~Q(status='cancelled'))
sites = [s.site for s in site_events]
form.fields['sites'].initial = sites
return form
def get_context_data(self, **kwargs):
context = super(DREventCreate, self).get_context_data(**kwargs)
context['pk'] = self.kwargs['pk']
return context
def form_valid(self, form):
# Get old DR Event and mark it as superseded. Also get old site events
old_dr_event = DREvent.objects.get(pk=self.kwargs['pk']) # Add exception handling here (put in try block)
# Get the newly created DR Event
self.object = form.save(commit=False)
# Set correct previous version, and modification number
self.object.modification_number = old_dr_event.modification_number + 1
self.object.event_id = old_dr_event.event_id
self.object.save()
old_site_events = SiteEvent.objects.filter(dr_event=old_dr_event)
old_dr_event.superseded = True
old_dr_event.save()
old_dr_event = DREvent.objects.get(pk=self.kwargs['pk']) # Add exception handling here (put in try block)
# Get list of sites that were enrolled in the old DR Event
old_sites = list(old_site_events.values_list('site', flat=True))
# Need to compare sites
# Get list of sites that were just chosen in the form
form_sites = [site.pk for site in form.cleaned_data['sites']]
# Sites that need new site events
new_sites_to_be_created = list(set(form_sites) - set(old_sites))
# Sites that remained in the form's chosen sites
existing_sites_to_be_updated = list(set(form_sites) & set(old_sites))
# Sites/site-events that we don't need
sites_to_be_superseded = list(set(old_sites) - set(form_sites))
site_events_to_be_removed = SiteEvent.objects.filter(site__in=sites_to_be_superseded) \
.filter(dr_event=old_dr_event)
# Get remaining (existing) site events to update below
remaining_site_events = SiteEvent.objects.filter(dr_event=old_dr_event) \
.filter(site__pk__in=existing_sites_to_be_updated)
for site_pk in new_sites_to_be_created:
site = Site.objects.get(pk=site_pk)
s = SiteEvent()
s.dr_event = self.object
s.site = site
s.status = 'scheduled'
s.last_status_time = timezone.now()
# s.modification_number = 0
s.opt_in = 'none'
s.save()
# For sites removed from the event, mark them 'cancelled' and point
# them to new DR Event
# 1. 'Delete' these site-events by marking them cancelled
for site_event_to_be_removed in site_events_to_be_removed:
site_event_to_be_removed.status = 'cancelled'
site_event_to_be_removed.dr_event = self.object
site_event_to_be_removed.ven_status = 'not_told'
site_event_to_be_removed.save()
# With remaining site events, re-point them to new DR Event
for site_event in remaining_site_events:
site_event.dr_event = self.object
site_event.status = 'scheduled'
site_event.ven_status = 'not_told'
site_event.save()
return super(ModelFormMixin, self).form_valid(form)
@login_required
def overview(request):
if request.method == "GET":
update_event_statuses()
customers = Customer.objects.annotate(sites=Count('site'),
online=Count(Case(When(site__online=True, then=1))),
offline=Count(Case(When(site__online=False, then=1)))) \
.order_by('name')
# DR Event Table
dr_event_data = DREvent.objects.filter(end__gt=timezone.now()) \
.filter(superseded=False) \
.annotate(numSites=Count('sites')) \
.select_related('dr_program') \
.order_by('start')
dr_program_edit_form = DRProgramEditForm()
context = {'customers': customers, 'dr_event_data': dr_event_data, 'form': dr_program_edit_form}
return render(request, 'vtn/home.html', context)
# The form was submitted to edit a DR Program
else:
return HttpResponseRedirect(reverse_lazy('vtn:edit_program', kwargs={'pk': request.POST['dr_program']}))
# For the filter event
def get_dr_event_form(request):
dr_program = request.GET.get('dr_program', '')
if dr_program != '':
dr_program = DRProgram.objects.get(pk=dr_program)
form = DREventFilterForm()
sites = dr_program.sites.all().order_by('site_name')
form.fields["sites"].queryset = sites
form.fields["sites"].initial = sites
return render(request, 'vtn/dr_event_filter_form.html', {'form': form, 'dr_program': dr_program})
def get_dr_event_details(request, pk):
# This function is called when a customer or site is selected
# on the DR Event detail screen. It loads the graph data for
# the specified customer or site.
customer = request.GET.get('customer', '')
site_pk = request.GET.get('site', '')
context = {}
event = DREvent.objects.get(pk=pk)
if customer != '':
if customer == 'empty':
sites = Site.objects.filter(siteevent__dr_event=event)
else:
customers = Customer.objects.all().order_by('pk')
customer = customers.get(pk=customer)
sites = Site.objects.filter(siteevent__dr_event=event).filter(customer=customer)
elif site_pk == 'empty':
sites = Site.objects.filter(siteevent__dr_event=event)
else:
sites = Site.objects.filter(pk=site_pk)
start = event.start
end = event.end
date_slice = "trunc(extract(epoch from created_on) / '{}' ) * {}".format(str(settings.GRAPH_TIMECHUNK_SECONDS),
str(settings.GRAPH_TIMECHUNK_SECONDS))
t_data = Telemetry.objects.filter(site__in=sites) \
.filter(created_on__range=(start, end)) \
.extra(select={'date_slice': date_slice}) \
.values('date_slice', 'site') \
.annotate(avg_baseline_power_kw=Avg('baseline_power_kw'),
avg_measured_power_kw=Avg('measured_power_kw'),
time=Min('created_on'))
if t_data.count() == 0:
context['no_data_for_sites'] = 'True'
return render(request, 'vtn/dr_event_customer_detail.html', context)
else:
co = t_data.order_by('-created_on')
context['t_data'] = t_data
last = co.first()['time']
first = co.last()['time']
difference = (last - first).seconds
quarter = difference // 4
last = last - timedelta(seconds=quarter)
first = first + timedelta(seconds=quarter)
context['start_focus'] = first
context['end_focus'] = last
sum_baseline = {}
sum_measured = {}
for datum in t_data:
if datum['date_slice'] in sum_baseline:
sum_baseline[datum['date_slice']] += datum['avg_baseline_power_kw']
else:
sum_baseline[datum['date_slice']] = datum['avg_baseline_power_kw']
if datum['date_slice'] in sum_measured:
sum_measured[datum['date_slice']] += datum['avg_measured_power_kw']
else:
sum_measured[datum['date_slice']] = datum['avg_measured_power_kw']
context['sum_baseline'] = OrderedDict(sorted(sum_baseline.items(), key=lambda t: t[0]))
context['sum_measured'] = OrderedDict(sorted(sum_measured.items(), key=lambda t: t[0]))
context['no_data_for_sites'] = 'False'
return render(request, 'vtn/dr_event_customer_detail.html', context)
def dr_event_dispatch(request, pk):
# This function is called after a user clicks on a DR Event on the
# overview screen. It "routes" the request to either the DR Event
# update screen or DR Event detail screen, depending on whether the
# event's start time has passed.
event = DREvent.objects.get(pk=pk)
# if event.start < datetime.now(tz=timezone.utc):
if event.start < timezone.now():
return HttpResponseRedirect(reverse_lazy('vtn:dr_event_detail', kwargs={'pk': pk}))
else:
return HttpResponseRedirect(reverse_lazy('vtn:dr_event_update', kwargs={'pk': pk}))
class DREventDetail(TemplateView):
template_name = "vtn/dr_event_detail.html"
def get_context_data(self, **kwargs):
context = super(DREventDetail, self).get_context_data(**kwargs)
event = DREvent.objects.get(pk=self.kwargs['pk'])
sites = DREvent.objects.get(pk=self.kwargs['pk']).sites.all()
customer_form = DREventCustomerDetailForm()
site_form = DREventSiteDetailForm()
# Fill out context fields
customer_form.fields['customer'].queryset = Customer.objects.filter(site__in=sites).distinct()
site_form.fields['site'].queryset = Site.objects.filter(siteevent__dr_event=event)
context['event'] = event
context['customerForm'] = customer_form
context['siteForm'] = site_form
context['status'] = get_status(event)
context['pk'] = self.kwargs['pk']
context['start'] = event.start
context['end'] = event.end
# Get site events for "Site Detail" tab
context['site_events'] = SiteEvent.objects.filter(dr_event=event)
site_events = SiteEvent.objects.filter(dr_event=event)
for site_event in site_events:
site_event.last_stat = get_most_recent_stat(event, site_event.site)
context['site_events'] = site_events
# Only get those sites that have a corresponding Site Event
sites = Site.objects.filter(siteevent__dr_event=event)
# If there is no telemetry, tell template there is none so 'No data' is displayed
if Telemetry.objects.filter(site__in=sites).filter(created_on__range=(event.start, event.end)).count() == 0:
context['no_data'] = True
# If there is telemetry...
else:
start = event.start
end = event.end
date_slice = "trunc(extract(epoch from created_on) / '{}' ) * {}".format(
str(settings.GRAPH_TIMECHUNK_SECONDS),
str(settings.GRAPH_TIMECHUNK_SECONDS))
t_data = Telemetry.objects.filter(site__in=sites) \
.filter(created_on__range=(start, end)) \
.extra(select={'date_slice': date_slice}) \
.values('date_slice', 'site') \
.annotate(avg_baseline_power_kw=Avg('baseline_power_kw'),
avg_measured_power_kw=Avg('measured_power_kw'),
time=Min('created_on'))
co = t_data.order_by('-created_on')
context['t_data'] = t_data
last = co.first()['time']
first = co.last()['time']
difference = (last - first).seconds
quarter = difference // 4
last = last - timedelta(seconds=quarter)
first = first + timedelta(seconds=quarter)
context['start_focus'] = first
context['end_focus'] = last
sum_baseline = {}
sum_measured = {}
for datum in t_data:
if datum['date_slice'] in sum_baseline:
sum_baseline[datum['date_slice']] += datum['avg_baseline_power_kw']
else:
sum_baseline[datum['date_slice']] = datum['avg_baseline_power_kw']
if datum['date_slice'] in sum_measured:
sum_measured[datum['date_slice']] += datum['avg_measured_power_kw']
else:
sum_measured[datum['date_slice']] = datum['avg_measured_power_kw']
context['sum_baseline'] = OrderedDict(sorted(sum_baseline.items(), key=lambda t: t[0]))
context['sum_measured'] = OrderedDict(sorted(sum_measured.items(), key=lambda t: t[0]))
return context
def get_most_recent_stat(dr_event, site):
"""
:param site: The site to get the most recent measured power stat for.
:param dr_event: Used to get start and end times for telemetry.
:return: Ideally, returns the difference between the site's baseline power
and its actual power. If there is no baseline, it returns 'N.A.
"""
try:
t_data = Telemetry.objects.filter(site=site) \
.filter(reported_on__range=(dr_event.start, dr_event.end)) \
.order_by('-reported_on')[0]
if t_data.baseline_power_kw is not None:
return t_data.baseline_power_kw - t_data.measured_power_kw
else:
return t_data.measured_power_kw
except (IndexError, Exception):
return 'N.A.'
def get_status(dr_event):
if dr_event.start < timezone.now():
return "Active"
elif dr_event.scheduled_notification_time < timezone.now():
return "Notification"
| |
from __future__ import print_function
from copy import copy, deepcopy
import datetime
import inspect
import sys
import traceback
from django.core.management import call_command
from django.core.management.commands import loaddata
from django.db import models
from django import VERSION as DJANGO_VERSION
import south.db
from south import exceptions
from south.db import DEFAULT_DB_ALIAS
from south.models import MigrationHistory
from south.signals import ran_migration
from south.utils.py3 import StringIO
class Migrator(object):
def __init__(self, verbosity=0, interactive=False):
self.verbosity = int(verbosity)
self.interactive = bool(interactive)
@staticmethod
def title(target):
raise NotImplementedError()
def print_title(self, target):
if self.verbosity:
print(self.title(target))
@staticmethod
def status(target):
raise NotImplementedError()
def print_status(self, migration):
status = self.status(migration)
if self.verbosity and status:
print(status)
@staticmethod
def orm(migration):
raise NotImplementedError()
def backwards(self, migration):
return self._wrap_direction(migration.backwards(), migration.prev_orm())
def direction(self, migration):
raise NotImplementedError()
@staticmethod
def _wrap_direction(direction, orm):
args = inspect.getargspec(direction)
if len(args[0]) == 1:
# Old migration, no ORM should be passed in
return direction
return (lambda: direction(orm))
@staticmethod
def record(migration, database):
raise NotImplementedError()
def run_migration_error(self, migration, extra_info=''):
return (
' ! Error found during real run of migration! Aborting.\n'
'\n'
' ! Since you have a database that does not support running\n'
' ! schema-altering statements in transactions, we have had \n'
' ! to leave it in an interim state between migrations.\n'
'%s\n'
' ! The South developers regret this has happened, and would\n'
' ! like to gently persuade you to consider a slightly\n'
' ! easier-to-deal-with DBMS (one that supports DDL transactions)\n'
' ! NOTE: The error which caused the migration to fail is further up.'
) % extra_info
def run_migration(self, migration, database):
migration_function = self.direction(migration)
south.db.db.start_transaction()
try:
migration_function()
south.db.db.execute_deferred_sql()
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
# record us as having done this in the same transaction,
# since we're not in a dry run
self.record(migration, database)
except:
south.db.db.rollback_transaction()
if not south.db.db.has_ddl_transactions:
print(self.run_migration_error(migration))
print("Error in migration: %s" % migration)
raise
else:
try:
south.db.db.commit_transaction()
except:
print("Error during commit in migration: %s" % migration)
raise
def run(self, migration, database):
# Get the correct ORM.
south.db.db.current_orm = self.orm(migration)
# If we're not already in a dry run, and the database doesn't support
# running DDL inside a transaction, *cough*MySQL*cough* then do a dry
# run first.
if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator):
if not south.db.db.has_ddl_transactions:
dry_run = DryRunMigrator(migrator=self, ignore_fail=False)
dry_run.run_migration(migration, database)
return self.run_migration(migration, database)
def send_ran_migration(self, migration, database):
ran_migration.send(None,
app=migration.app_label(),
migration=migration,
method=self.__class__.__name__.lower(),
verbosity=self.verbosity,
interactive=self.interactive,
db=database)
def migrate(self, migration, database):
"""
Runs the specified migration forwards/backwards, in order.
"""
app = migration.migrations._migrations
migration_name = migration.name()
self.print_status(migration)
result = self.run(migration, database)
self.send_ran_migration(migration, database)
return result
def migrate_many(self, target, migrations, database):
raise NotImplementedError()
class MigratorWrapper(object):
def __init__(self, migrator, *args, **kwargs):
self._migrator = copy(migrator)
attributes = dict([(k, getattr(self, k))
for k in self.__class__.__dict__
if not k.startswith('__')])
self._migrator.__dict__.update(attributes)
self._migrator.__dict__['_wrapper'] = self
def __getattr__(self, name):
return getattr(self._migrator, name)
class DryRunMigrator(MigratorWrapper):
def __init__(self, ignore_fail=True, *args, **kwargs):
super(DryRunMigrator, self).__init__(*args, **kwargs)
self._ignore_fail = ignore_fail
def _run_migration(self, migration):
if migration.no_dry_run():
if self.verbosity:
print(" - Migration '%s' is marked for no-dry-run." % migration)
return
for name, db in south.db.dbs.iteritems():
south.db.dbs[name].dry_run = True
# preserve the constraint cache as it can be mutated by the dry run
constraint_cache = deepcopy(south.db.db._constraint_cache)
if self._ignore_fail:
south.db.db.debug, old_debug = False, south.db.db.debug
pending_creates = south.db.db.get_pending_creates()
south.db.db.start_transaction()
migration_function = self.direction(migration)
try:
try:
migration_function()
south.db.db.execute_deferred_sql()
except:
raise exceptions.FailedDryRun(migration, sys.exc_info())
finally:
south.db.db.rollback_transactions_dry_run()
if self._ignore_fail:
south.db.db.debug = old_debug
south.db.db.clear_run_data(pending_creates)
for name, db in south.db.dbs.iteritems():
south.db.dbs[name].dry_run = False
# restore the preserved constraint cache from before dry run was
# executed
south.db.db._constraint_cache = constraint_cache
def run_migration(self, migration, database):
try:
self._run_migration(migration)
except exceptions.FailedDryRun:
if self._ignore_fail:
return False
raise
def send_ran_migration(self, *args, **kwargs):
pass
class FakeMigrator(MigratorWrapper):
def run(self, migration, database):
# Don't actually run, just record as if ran
self.record(migration, database)
if self.verbosity:
print(' (faked)')
def send_ran_migration(self, *args, **kwargs):
pass
class LoadInitialDataMigrator(MigratorWrapper):
def load_initial_data(self, target, db='default'):
if target is None or target != target.migrations[-1]:
return
# Load initial data, if we ended up at target
if self.verbosity:
print(" - Loading initial data for %s." % target.app_label())
if DJANGO_VERSION < (1, 6):
self.pre_1_6(target, db)
else:
self.post_1_6(target, db)
def pre_1_6(self, target, db):
# Override Django's get_apps call temporarily to only load from the
# current app
old_get_apps = models.get_apps
new_get_apps = lambda: [models.get_app(target.app_label())]
models.get_apps = new_get_apps
loaddata.get_apps = new_get_apps
try:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=db)
finally:
models.get_apps = old_get_apps
loaddata.get_apps = old_get_apps
def post_1_6(self, target, db):
import django.db.models.loading
## build a new 'AppCache' object with just the app we care about.
old_cache = django.db.models.loading.cache
new_cache = django.db.models.loading.AppCache()
new_cache.get_apps = lambda: [new_cache.get_app(target.app_label())]
## monkeypatch
django.db.models.loading.cache = new_cache
try:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=db)
finally:
## unmonkeypatch
django.db.models.loading.cache = old_cache
def migrate_many(self, target, migrations, database):
migrator = self._migrator
result = migrator.__class__.migrate_many(migrator, target, migrations, database)
if result:
self.load_initial_data(target, db=database)
return True
class Forwards(Migrator):
"""
Runs the specified migration forwards, in order.
"""
torun = 'forwards'
@staticmethod
def title(target):
if target is not None:
return " - Migrating forwards to %s." % target.name()
else:
assert False, "You cannot migrate forwards to zero."
@staticmethod
def status(migration):
return ' > %s' % migration
@staticmethod
def orm(migration):
return migration.orm()
def forwards(self, migration):
return self._wrap_direction(migration.forwards(), migration.orm())
direction = forwards
@staticmethod
def record(migration, database):
# Record us as having done this
record = MigrationHistory.for_migration(migration, database)
try:
from django.utils.timezone import now
record.applied = now()
except ImportError:
record.applied = datetime.datetime.utcnow()
if database != DEFAULT_DB_ALIAS:
record.save(using=database)
else:
# Django 1.1 and below always go down this branch.
record.save()
def format_backwards(self, migration):
if migration.no_dry_run():
return " (migration cannot be dry-run; cannot discover commands)"
old_debug, old_dry_run = south.db.db.debug, south.db.db.dry_run
south.db.db.debug = south.db.db.dry_run = True
stdout = sys.stdout
sys.stdout = StringIO()
try:
try:
self.backwards(migration)()
return sys.stdout.getvalue()
except:
raise
finally:
south.db.db.debug, south.db.db.dry_run = old_debug, old_dry_run
sys.stdout = stdout
def run_migration_error(self, migration, extra_info=''):
extra_info = ('\n'
'! You *might* be able to recover with:'
'%s'
'%s' %
(self.format_backwards(migration), extra_info))
return super(Forwards, self).run_migration_error(migration, extra_info)
def migrate_many(self, target, migrations, database):
try:
for migration in migrations:
result = self.migrate(migration, database)
if result is False: # The migrations errored, but nicely.
return False
finally:
# Call any pending post_syncdb signals
south.db.db.send_pending_create_signals(verbosity=self.verbosity,
interactive=self.interactive)
return True
class Backwards(Migrator):
"""
Runs the specified migration backwards, in order.
"""
torun = 'backwards'
@staticmethod
def title(target):
if target is None:
return " - Migrating backwards to zero state."
else:
return " - Migrating backwards to just after %s." % target.name()
@staticmethod
def status(migration):
return ' < %s' % migration
@staticmethod
def orm(migration):
return migration.prev_orm()
direction = Migrator.backwards
@staticmethod
def record(migration, database):
# Record us as having not done this
record = MigrationHistory.for_migration(migration, database)
if record.id is not None:
if database != DEFAULT_DB_ALIAS:
record.delete(using=database)
else:
# Django 1.1 always goes down here
record.delete()
def migrate_many(self, target, migrations, database):
for migration in migrations:
self.migrate(migration, database)
return True
| |
#!/usr/bin/env python
# (c) 2012-2013 Anaconda, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from subprocess import check_output, PIPE, Popen, STDOUT
from os.path import join, dirname, abspath, isdir
from os import makedirs, pathsep
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
try:
from shlex import quote
except ImportError:
from pipes import quote
import sys
import json
import re
manpath = join(dirname(__file__), 'build', 'man')
if not isdir(manpath):
makedirs(manpath)
rstpath = join(dirname(__file__), 'source', 'commands')
if not isdir(rstpath):
makedirs(rstpath)
RST_HEADER = """
.. _{command}_ref:
conda {command}
=======================
.. raw:: html
"""
def run_command(*args, **kwargs):
include_stderr = kwargs.pop('include_stderr', False)
if include_stderr:
stderr_pipe = STDOUT
else:
stderr_pipe = PIPE
p = Popen(*args, stdout=PIPE, stderr=stderr_pipe, **kwargs)
out, err = p.communicate()
if err is None:
err = b''
out, err = out.decode('utf-8'), err.decode('utf-8')
if p.returncode != 0:
print("%r failed with error code %s" % (' '.join(map(quote, args[0])), p.returncode), file=sys.stderr)
elif err:
print("%r gave stderr output: %s" % (' '.join(*args), err))
return out
def str_check_output(*args, **kwargs):
return check_output(*args, **kwargs).decode('utf-8')
def conda_help(cache=[]):
if cache:
return cache[0]
cache.append(str_check_output(['conda', '--help']))
return cache[0]
def conda_command_help(command):
return str_check_output(['conda'] + command.split() + ['--help'])
def conda_commands():
print("Getting list of core commands")
help = conda_help()
commands = []
start = False
for line in help.splitlines():
# Commands start after "command" header
if line.strip() == 'command':
start = True
continue
if start:
# The end of the commands
if not line:
break
if line[4] != ' ':
commands.append(line.split()[0])
return commands
def external_commands():
print("Getting list of external commands")
help = conda_help()
commands = []
start = False
for line in help.splitlines():
# Commands start after "command" header
if line.strip() == 'other commands:':
start = True
continue
if start:
# The end of the commands
if not line:
break
if line[4] != ' ':
commands.append(line.split()[0])
# TODO: Parallelize this
print("Getting list of external subcommands")
subcommands_re = re.compile(r'\s*\{(.*)\}\s*')
# Check for subcommands (like conda skeleton pypi)
command_help = {}
def get_help(command):
command_help[command] = conda_command_help(command)
print("Checked for subcommand help for %s" % command)
with ThreadPoolExecutor(len(commands)) as executor:
# list() is needed for force exceptions to be raised
list(executor.map(get_help, commands))
for command in command_help:
help = command_help[command]
start = False
for line in help.splitlines():
if line.strip() == "positional arguments:":
start = True
continue
if start:
m = subcommands_re.match(line)
if m:
commands.extend(['%s %s' % (command, i) for i in
m.group(1).split(',')])
break
return commands
def man_replacements():
# XXX: We should use conda-api for this, but it's currently annoying to set the
# root prefix with.
info = json.loads(str_check_output(['conda', 'info', '--json']))
# We need to use an ordered dict because the root prefix should be
# replaced last, since it is typically a substring of the default prefix
r = OrderedDict([
(info['default_prefix'], 'default prefix'),
(pathsep.join(info['envs_dirs']), 'envs dirs'),
# For whatever reason help2man won't italicize these on its own
# Note these require conda > 3.7.1
(info['user_rc_path'], r'\fI\,user .condarc path\/\fP'),
(info['sys_rc_path'], r'\fI\,system .condarc path\/\fP'),
(info['root_prefix'], r'root prefix'),
])
return r
def generate_man(command):
conda_version = run_command(['conda', '--version'], include_stderr=True)
manpage = ''
retries = 5
while not manpage and retries:
manpage = run_command([
'help2man',
'--name', 'conda %s' % command,
'--section', '1',
'--source', 'Anaconda, Inc.',
'--version-string', conda_version,
'--no-info',
'conda %s' % command,
])
retries -= 1
if not manpage:
sys.exit("Error: Could not get help for conda %s" % command)
replacements = man_replacements()
for text in replacements:
manpage = manpage.replace(text, replacements[text])
with open(join(manpath, 'conda-%s.1' % command.replace(' ', '-')), 'w') as f:
f.write(manpage)
print("Generated manpage for conda %s" % command)
def generate_html(command):
command_file = command.replace(' ', '-')
# Use abspath so that it always has a path separator
man = Popen(['man', abspath(join(manpath, 'conda-%s.1' % command_file))], stdout=PIPE)
htmlpage = check_output([
'man2html',
'-bare', # Don't use HTML, HEAD, or BODY tags
'title', 'conda-%s' % command_file,
'-topm', '0', # No top margin
'-botm', '0', # No bottom margin
],
stdin=man.stdout)
with open(join(manpath, 'conda-%s.html' % command_file), 'wb') as f:
f.write(htmlpage)
print("Generated html for conda %s" % command)
def write_rst(command, sep=None):
command_file = command.replace(' ', '-')
with open(join(manpath, 'conda-%s.html' % command_file), 'r') as f:
html = f.read()
rp = rstpath
if sep:
rp = join(rp, sep)
if not isdir(rp):
makedirs(rp)
with open(join(rp, 'conda-%s.rst' % command_file), 'w') as f:
f.write(RST_HEADER.format(command=command))
for line in html.splitlines():
f.write(' ')
f.write(line)
f.write('\n')
print("Generated rst for conda %s" % command)
def main():
core_commands = []
# let's just hard-code this for now
# build_commands = ()
build_commands = [
'build',
'convert',
'develop',
'index',
'inspect',
'inspect channels',
'inspect linkages',
'inspect objects',
'metapackage',
'render',
'skeleton',
'skeleton cpan',
'skeleton cran',
'skeleton luarocks',
'skeleton pypi',
]
commands = sys.argv[1:] or core_commands + build_commands
def gen_command(command):
generate_man(command)
generate_html(command)
with ThreadPoolExecutor(10) as executor:
# list() is needed to force exceptions to be raised
list(executor.map(gen_command, commands))
for command in [c for c in build_commands if c in commands]:
write_rst(command)
if __name__ == '__main__':
sys.exit(main())
| |
"""
Loads functions that are mixed in to the standard library. E.g. builtins are
written in C (binaries), but my autocompletion only understands Python code. By
mixing in Python code, the autocompletion should work much better for builtins.
"""
import os
import inspect
import types
from jedi._compatibility import is_py3, builtins, unicode, is_py34
from jedi.parser import ParserWithRecovery, load_grammar
from jedi.parser import tree as pt
modules = {}
MethodDescriptorType = type(str.replace)
# These are not considered classes and access is granted even though they have
# a __class__ attribute.
NOT_CLASS_TYPES = (
types.BuiltinFunctionType,
types.CodeType,
types.FrameType,
types.FunctionType,
types.GeneratorType,
types.GetSetDescriptorType,
types.LambdaType,
types.MemberDescriptorType,
types.MethodType,
types.ModuleType,
types.TracebackType,
MethodDescriptorType
)
if is_py3:
NOT_CLASS_TYPES += (
types.MappingProxyType,
types.SimpleNamespace
)
if is_py34:
NOT_CLASS_TYPES += (types.DynamicClassAttribute,)
class FakeDoesNotExist(Exception):
pass
def _load_faked_module(module):
module_name = module.__name__
if module_name == '__builtin__' and not is_py3:
module_name = 'builtins'
try:
return modules[module_name]
except KeyError:
path = os.path.dirname(os.path.abspath(__file__))
try:
with open(os.path.join(path, 'fake', module_name) + '.pym') as f:
source = f.read()
except IOError:
modules[module_name] = None
return
grammar = load_grammar(version='3.4')
module = ParserWithRecovery(grammar, unicode(source), module_name).module
modules[module_name] = module
if module_name == 'builtins' and not is_py3:
# There are two implementations of `open` for either python 2/3.
# -> Rename the python2 version (`look at fake/builtins.pym`).
open_func = _search_scope(module, 'open')
open_func.children[1].value = 'open_python3'
open_func = _search_scope(module, 'open_python2')
open_func.children[1].value = 'open'
return module
def _search_scope(scope, obj_name):
for s in scope.subscopes:
if s.name.value == obj_name:
return s
def get_module(obj):
if inspect.ismodule(obj):
return obj
try:
obj = obj.__objclass__
except AttributeError:
pass
try:
imp_plz = obj.__module__
except AttributeError:
# Unfortunately in some cases like `int` there's no __module__
return builtins
else:
if imp_plz is None:
# Happens for example in `(_ for _ in []).send.__module__`.
return builtins
else:
try:
return __import__(imp_plz)
except ImportError:
# __module__ can be something arbitrary that doesn't exist.
return builtins
def _faked(module, obj, name):
# Crazy underscore actions to try to escape all the internal madness.
if module is None:
module = get_module(obj)
faked_mod = _load_faked_module(module)
if faked_mod is None:
return None, None
# Having the module as a `parser.tree.Module`, we need to scan
# for methods.
if name is None:
if inspect.isbuiltin(obj) or inspect.isclass(obj):
return _search_scope(faked_mod, obj.__name__), faked_mod
elif not inspect.isclass(obj):
# object is a method or descriptor
try:
objclass = obj.__objclass__
except AttributeError:
return None, None
else:
cls = _search_scope(faked_mod, objclass.__name__)
if cls is None:
return None, None
return _search_scope(cls, obj.__name__), faked_mod
else:
if obj == module:
return _search_scope(faked_mod, name), faked_mod
else:
try:
cls_name = obj.__name__
except AttributeError:
return None, None
cls = _search_scope(faked_mod, cls_name)
if cls is None:
return None, None
return _search_scope(cls, name), faked_mod
return None, None
def memoize_faked(obj):
"""
A typical memoize function that ignores issues with non hashable results.
"""
cache = obj.cache = {}
def memoizer(*args, **kwargs):
key = (obj, args, frozenset(kwargs.items()))
try:
result = cache[key]
except TypeError:
return obj(*args, **kwargs)
except KeyError:
result = obj(*args, **kwargs)
if result is not None:
cache[key] = obj(*args, **kwargs)
return result
else:
return result
return memoizer
@memoize_faked
def _get_faked(module, obj, name=None):
result, fake_module = _faked(module, obj, name)
if result is None:
# We're not interested in classes. What we want is functions.
raise FakeDoesNotExist
elif result.type == 'classdef':
return result, fake_module
else:
# Set the docstr which was previously not set (faked modules don't
# contain it).
assert result.type == 'funcdef'
doc = '"""%s"""' % obj.__doc__ # TODO need escapes.
suite = result.children[-1]
string = pt.String(doc, (0, 0), '')
new_line = pt.Newline('\n', (0, 0))
docstr_node = pt.Node('simple_stmt', [string, new_line])
suite.children.insert(1, docstr_node)
return result, fake_module
def get_faked(evaluator, module, obj, name=None, parent_context=None):
if parent_context and parent_context.tree_node is not None:
# Try to search in already clearly defined stuff.
found = _search_scope(parent_context.tree_node, name)
if found is not None:
return found
else:
raise FakeDoesNotExist
faked, fake_module = _get_faked(module and module.obj, obj, name)
if module is not None:
module.used_names = fake_module.used_names
return faked
def is_class_instance(obj):
"""Like inspect.* methods."""
try:
cls = obj.__class__
except AttributeError:
return False
else:
return cls != type and not issubclass(cls, NOT_CLASS_TYPES)
| |
from collections import defaultdict
from queue import Queue
from unittest.mock import patch
from redis.exceptions import ConnectionError
from CTFd.config import TestingConfig
from CTFd.utils.events import EventManager, RedisEventManager, ServerSentEvent
from tests.helpers import create_ctfd, destroy_ctfd, login_as_user, register_user
def test_event_manager_installed():
"""Test that EventManager is installed on the Flask app"""
app = create_ctfd()
assert type(app.events_manager) == EventManager
destroy_ctfd(app)
def test_event_manager_subscription():
"""Test that EventManager subscribing works"""
with patch.object(Queue, "get") as fake_queue:
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
saved_event = {"type": "notification", "data": saved_data}
fake_queue.return_value = saved_event
event_manager = EventManager()
events = event_manager.subscribe()
message = next(events)
assert isinstance(message, ServerSentEvent)
assert message.to_dict() == {"data": "", "type": "ping"}
assert message.__str__().startswith("event:ping")
assert len(event_manager.clients) == 1
message = next(events)
assert isinstance(message, ServerSentEvent)
assert message.to_dict() == saved_event
assert message.__str__().startswith("event:notification\ndata:")
assert len(event_manager.clients) == 1
def test_event_manager_publish():
"""Test that EventManager publishing to clients works"""
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
event_manager = EventManager()
q = defaultdict(Queue)
event_manager.clients[id(q)] = q
event_manager.publish(data=saved_data, type="notification", channel="ctf")
event = event_manager.clients[id(q)]["ctf"].get()
event = ServerSentEvent(**event)
assert event.data == saved_data
def test_event_endpoint_is_event_stream():
"""Test that the /events endpoint is text/event-stream"""
app = create_ctfd()
with patch.object(Queue, "get") as fake_queue:
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
saved_event = {"type": "notification", "data": saved_data}
fake_queue.return_value = saved_event
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/events")
assert "text/event-stream" in r.headers["Content-Type"]
destroy_ctfd(app)
def test_redis_event_manager_installed():
"""Test that RedisEventManager is installed on the Flask app"""
class RedisConfig(TestingConfig):
REDIS_URL = "redis://localhost:6379/1"
CACHE_REDIS_URL = "redis://localhost:6379/1"
CACHE_TYPE = "redis"
try:
app = create_ctfd(config=RedisConfig)
except ConnectionError:
print("Failed to connect to redis. Skipping test.")
else:
with app.app_context():
assert isinstance(app.events_manager, RedisEventManager)
destroy_ctfd(app)
def test_redis_event_manager_subscription():
"""Test that RedisEventManager subscribing works."""
class RedisConfig(TestingConfig):
REDIS_URL = "redis://localhost:6379/2"
CACHE_REDIS_URL = "redis://localhost:6379/2"
CACHE_TYPE = "redis"
try:
app = create_ctfd(config=RedisConfig)
except ConnectionError:
print("Failed to connect to redis. Skipping test.")
else:
with app.app_context():
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
saved_event = {"type": "notification", "data": saved_data}
with patch.object(Queue, "get") as fake_queue:
fake_queue.return_value = saved_event
event_manager = RedisEventManager()
events = event_manager.subscribe()
message = next(events)
assert isinstance(message, ServerSentEvent)
assert message.to_dict() == {"data": "", "type": "ping"}
assert message.__str__().startswith("event:ping")
message = next(events)
assert isinstance(message, ServerSentEvent)
assert message.to_dict() == saved_event
assert message.__str__().startswith("event:notification\ndata:")
destroy_ctfd(app)
def test_redis_event_manager_publish():
"""Test that RedisEventManager publishing to clients works."""
class RedisConfig(TestingConfig):
REDIS_URL = "redis://localhost:6379/3"
CACHE_REDIS_URL = "redis://localhost:6379/3"
CACHE_TYPE = "redis"
try:
app = create_ctfd(config=RedisConfig)
except ConnectionError:
print("Failed to connect to redis. Skipping test.")
else:
with app.app_context():
saved_data = {
"user_id": None,
"title": "asdf",
"content": "asdf",
"team_id": None,
"user": None,
"team": None,
"date": "2019-01-28T01:20:46.017649+00:00",
"id": 10,
}
event_manager = RedisEventManager()
event_manager.publish(data=saved_data, type="notification", channel="ctf")
destroy_ctfd(app)
def test_redis_event_manager_listen():
"""Test that RedisEventManager listening pubsub works."""
# This test is nob currently working properly
# This test is sort of incomplete b/c we aren't also subscribing
# I wasnt able to get listening and subscribing to work at the same time
# But the code does work under gunicorn and serve.py
try:
# import importlib
# from gevent.monkey import patch_time, patch_socket
# from gevent import Timeout
# patch_time()
# patch_socket()
class RedisConfig(TestingConfig):
REDIS_URL = "redis://localhost:6379/4"
CACHE_REDIS_URL = "redis://localhost:6379/4"
CACHE_TYPE = "redis"
try:
app = create_ctfd(config=RedisConfig)
except ConnectionError:
print("Failed to connect to redis. Skipping test.")
else:
with app.app_context():
# saved_event = {
# "data": {
# "team_id": None,
# "user_id": None,
# "content": "asdf",
# "title": "asdf",
# "id": 1,
# "team": None,
# "user": None,
# "date": "2020-08-31T23:57:27.193081+00:00",
# "type": "toast",
# "sound": None,
# },
# "type": "notification",
# }
event_manager = RedisEventManager()
# def disable_retry(f, *args, **kwargs):
# return f()
# with patch("tenacity.retry", side_effect=disable_retry):
# with Timeout(10):
# event_manager.listen()
event_manager.listen()
# event_manager.publish(
# data=saved_event["data"], type="notification", channel="ctf"
# )
destroy_ctfd(app)
finally:
pass
# import socket
# import time
# importlib.reload(socket)
# importlib.reload(time)
| |
"""
Commands that are available from the connect screen.
"""
import re
import datetime
from codecs import lookup as codecs_lookup
from django.conf import settings
from evennia.comms.models import ChannelDB
from evennia.server.sessionhandler import SESSIONS
from evennia.utils import class_from_module, create, logger, utils, gametime
from evennia.commands.cmdhandler import CMD_LOGINSTART
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
# limit symbol import for API
__all__ = (
"CmdUnconnectedConnect",
"CmdUnconnectedCreate",
"CmdUnconnectedQuit",
"CmdUnconnectedLook",
"CmdUnconnectedHelp",
)
MULTISESSION_MODE = settings.MULTISESSION_MODE
CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
def create_guest_account(session):
"""
Creates a guest account/character for this session, if one is available.
Args:
session (Session): the session which will use the guest account/character.
Returns:
GUEST_ENABLED (boolean), account (Account):
the boolean is whether guest accounts are enabled at all.
the Account which was created from an available guest name.
"""
enabled = settings.GUEST_ENABLED
address = session.address
# Get account class
Guest = class_from_module(settings.BASE_GUEST_TYPECLASS)
# Get an available guest account
# authenticate() handles its own throttling
account, errors = Guest.authenticate(ip=address)
if account:
return enabled, account
else:
session.msg("|R%s|n" % "\n".join(errors))
return enabled, None
def create_normal_account(session, name, password):
"""
Creates an account with the given name and password.
Args:
session (Session): the session which is requesting to create an account.
name (str): the name that the account wants to use for login.
password (str): the password desired by this account, for login.
Returns:
account (Account): the account which was created from the name and password.
"""
# Get account class
Account = class_from_module(settings.BASE_ACCOUNT_TYPECLASS)
address = session.address
# Match account name and check password
# authenticate() handles all its own throttling
account, errors = Account.authenticate(
username=name, password=password, ip=address, session=session
)
if not account:
# No accountname or password match
session.msg("|R%s|n" % "\n".join(errors))
return None
return account
class CmdUnconnectedConnect(COMMAND_DEFAULT_CLASS):
"""
connect to the game
Usage (at login screen):
connect accountname password
connect "account name" "pass word"
Use the create command to first create an account before logging in.
If you have spaces in your name, enclose it in double quotes.
"""
key = "connect"
aliases = ["conn", "con", "co"]
locks = "cmd:all()" # not really needed
arg_regex = r"\s.*?|$"
def func(self):
"""
Uses the Django admin api. Note that unlogged-in commands
have a unique position in that their func() receives
a session object instead of a source_object like all
other types of logged-in commands (this is because
there is no object yet before the account has logged in)
"""
session = self.caller
address = session.address
args = self.args
# extract double quote parts
parts = [part.strip() for part in re.split(r"\"", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no double quotes being found, or a guest login
parts = parts[0].split(None, 1)
# Guest login
if len(parts) == 1 and parts[0].lower() == "guest":
# Get Guest typeclass
Guest = class_from_module(settings.BASE_GUEST_TYPECLASS)
account, errors = Guest.authenticate(ip=address)
if account:
session.sessionhandler.login(session, account)
return
else:
session.msg("|R%s|n" % "\n".join(errors))
return
if len(parts) != 2:
session.msg("\n\r Usage (without <>): connect <name> <password>")
return
# Get account class
Account = class_from_module(settings.BASE_ACCOUNT_TYPECLASS)
name, password = parts
account, errors = Account.authenticate(
username=name, password=password, ip=address, session=session
)
if account:
session.sessionhandler.login(session, account)
else:
session.msg("|R%s|n" % "\n".join(errors))
class CmdUnconnectedCreate(COMMAND_DEFAULT_CLASS):
"""
create a new account account
Usage (at login screen):
create <accountname> <password>
create "account name" "pass word"
This creates a new account account.
If you have spaces in your name, enclose it in double quotes.
"""
key = "create"
aliases = ["cre", "cr"]
locks = "cmd:all()"
arg_regex = r"\s.*?|$"
def func(self):
"""Do checks and create account"""
session = self.caller
args = self.args.strip()
address = session.address
# Get account class
Account = class_from_module(settings.BASE_ACCOUNT_TYPECLASS)
# extract double quoted parts
parts = [part.strip() for part in re.split(r"\"", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no quotes being found
parts = parts[0].split(None, 1)
if len(parts) != 2:
string = (
"\n Usage (without <>): create <name> <password>"
"\nIf <name> or <password> contains spaces, enclose it in double quotes."
)
session.msg(string)
return
username, password = parts
# everything's ok. Create the new account account.
account, errors = Account.create(
username=username, password=password, ip=address, session=session
)
if account:
# tell the caller everything went well.
string = "A new account '%s' was created. Welcome!"
if " " in username:
string += (
"\n\nYou can now log in with the command 'connect \"%s\" <your password>'."
)
else:
string += "\n\nYou can now log with the command 'connect %s <your password>'."
session.msg(string % (username, username))
else:
session.msg("|R%s|n" % "\n".join(errors))
class CmdUnconnectedQuit(COMMAND_DEFAULT_CLASS):
"""
quit when in unlogged-in state
Usage:
quit
We maintain a different version of the quit command
here for unconnected accounts for the sake of simplicity. The logged in
version is a bit more complicated.
"""
key = "quit"
aliases = ["q", "qu"]
locks = "cmd:all()"
def func(self):
"""Simply close the connection."""
session = self.caller
session.sessionhandler.disconnect(session, "Good bye! Disconnecting.")
class CmdUnconnectedLook(COMMAND_DEFAULT_CLASS):
"""
look when in unlogged-in state
Usage:
look
This is an unconnected version of the look command for simplicity.
This is called by the server and kicks everything in gear.
All it does is display the connect screen.
"""
key = CMD_LOGINSTART
aliases = ["look", "l"]
locks = "cmd:all()"
def func(self):
"""Show the connect screen."""
callables = utils.callables_from_module(CONNECTION_SCREEN_MODULE)
if "connection_screen" in callables:
connection_screen = callables["connection_screen"]()
else:
connection_screen = utils.random_string_from_module(CONNECTION_SCREEN_MODULE)
if not connection_screen:
connection_screen = "No connection screen found. Please contact an admin."
self.caller.msg(connection_screen)
class CmdUnconnectedHelp(COMMAND_DEFAULT_CLASS):
"""
get help when in unconnected-in state
Usage:
help
This is an unconnected version of the help command,
for simplicity. It shows a pane of info.
"""
key = "help"
aliases = ["h", "?"]
locks = "cmd:all()"
def func(self):
"""Shows help"""
string = """
You are not yet logged into the game. Commands available at this point:
|wcreate|n - create a new account
|wconnect|n - connect with an existing account
|wlook|n - re-show the connection screen
|whelp|n - show this help
|wencoding|n - change the text encoding to match your client
|wscreenreader|n - make the server more suitable for use with screen readers
|wquit|n - abort the connection
First create an account e.g. with |wcreate Anna c67jHL8p|n
Next you can connect to the game: |wconnect Anna c67jHL8p|n
You can use the |wlook|n command if you want to see the connect screen again.
"""
if settings.STAFF_CONTACT_EMAIL:
string += "For support, please contact: %s" % settings.STAFF_CONTACT_EMAIL
self.caller.msg(string)
class CmdUnconnectedEncoding(COMMAND_DEFAULT_CLASS):
"""
set which text encoding to use in unconnected-in state
Usage:
encoding/switches [<encoding>]
Switches:
clear - clear your custom encoding
This sets the text encoding for communicating with Evennia. This is mostly
an issue only if you want to use non-ASCII characters (i.e. letters/symbols
not found in English). If you see that your characters look strange (or you
get encoding errors), you should use this command to set the server
encoding to be the same used in your client program.
Common encodings are utf-8 (default), latin-1, ISO-8859-1 etc.
If you don't submit an encoding, the current encoding will be displayed
instead.
"""
key = "encoding"
aliases = "encode"
locks = "cmd:all()"
def func(self):
"""
Sets the encoding.
"""
if self.session is None:
return
sync = False
if "clear" in self.switches:
# remove customization
old_encoding = self.session.protocol_flags.get("ENCODING", None)
if old_encoding:
string = "Your custom text encoding ('%s') was cleared." % old_encoding
else:
string = "No custom encoding was set."
self.session.protocol_flags["ENCODING"] = "utf-8"
sync = True
elif not self.args:
# just list the encodings supported
pencoding = self.session.protocol_flags.get("ENCODING", None)
string = ""
if pencoding:
string += (
"Default encoding: |g%s|n (change with |wencoding <encoding>|n)" % pencoding
)
encodings = settings.ENCODINGS
if encodings:
string += (
"\nServer's alternative encodings (tested in this order):\n |g%s|n"
% ", ".join(encodings)
)
if not string:
string = "No encodings found."
else:
# change encoding
old_encoding = self.session.protocol_flags.get("ENCODING", None)
encoding = self.args
try:
codecs_lookup(encoding)
except LookupError:
string = (
"|rThe encoding '|w%s|r' is invalid. Keeping the previous encoding '|w%s|r'.|n"
% (encoding, old_encoding)
)
else:
self.session.protocol_flags["ENCODING"] = encoding
string = "Your custom text encoding was changed from '|w%s|n' to '|w%s|n'." % (
old_encoding,
encoding,
)
sync = True
if sync:
self.session.sessionhandler.session_portal_sync(self.session)
self.caller.msg(string.strip())
class CmdUnconnectedScreenreader(COMMAND_DEFAULT_CLASS):
"""
Activate screenreader mode.
Usage:
screenreader
Used to flip screenreader mode on and off before logging in (when
logged in, use option screenreader on).
"""
key = "screenreader"
def func(self):
"""Flips screenreader setting."""
new_setting = not self.session.protocol_flags.get("SCREENREADER", False)
self.session.protocol_flags["SCREENREADER"] = new_setting
string = "Screenreader mode turned |w%s|n." % ("on" if new_setting else "off")
self.caller.msg(string)
self.session.sessionhandler.session_portal_sync(self.session)
class CmdUnconnectedInfo(COMMAND_DEFAULT_CLASS):
"""
Provides MUDINFO output, so that Evennia games can be added to Mudconnector
and Mudstats. Sadly, the MUDINFO specification seems to have dropped off the
face of the net, but it is still used by some crawlers. This implementation
was created by looking at the MUDINFO implementation in MUX2, TinyMUSH, Rhost,
and PennMUSH.
"""
key = "info"
locks = "cmd:all()"
def func(self):
self.caller.msg(
"## BEGIN INFO 1.1\nName: %s\nUptime: %s\nConnected: %d\nVersion: Evennia %s\n## END INFO"
% (
settings.SERVERNAME,
datetime.datetime.fromtimestamp(gametime.SERVER_START_TIME).ctime(),
SESSIONS.account_count(),
utils.get_evennia_version(),
)
)
def _create_account(session, accountname, password, permissions, typeclass=None, email=None):
"""
Helper function, creates an account of the specified typeclass.
"""
try:
new_account = create.create_account(
accountname, email, password, permissions=permissions, typeclass=typeclass
)
except Exception as e:
session.msg(
"There was an error creating the Account:\n%s\n If this problem persists, contact an admin."
% e
)
logger.log_trace()
return False
# This needs to be set so the engine knows this account is
# logging in for the first time. (so it knows to call the right
# hooks during login later)
new_account.db.FIRST_LOGIN = True
# join the new account to the public channel
pchannel = ChannelDB.objects.get_channel(settings.DEFAULT_CHANNELS[0]["key"])
if not pchannel or not pchannel.connect(new_account):
string = "New account '%s' could not connect to public channel!" % new_account.key
logger.log_err(string)
return new_account
def _create_character(session, new_account, typeclass, home, permissions):
"""
Helper function, creates a character based on an account's name.
This is meant for Guest and MULTISESSION_MODE < 2 situations.
"""
try:
new_character = create.create_object(
typeclass, key=new_account.key, home=home, permissions=permissions
)
# set playable character list
new_account.db._playable_characters.append(new_character)
# allow only the character itself and the account to puppet this character (and Developers).
new_character.locks.add(
"puppet:id(%i) or pid(%i) or perm(Developer) or pperm(Developer)"
% (new_character.id, new_account.id)
)
# If no description is set, set a default description
if not new_character.db.desc:
new_character.db.desc = "This is a character."
# We need to set this to have ic auto-connect to this character
new_account.db._last_puppet = new_character
except Exception as e:
session.msg(
"There was an error creating the Character:\n%s\n If this problem persists, contact an admin."
% e
)
logger.log_trace()
| |
from sqlalchemy import Column, Integer, String, Date, Boolean, PickleType
from steamweb.database import Base
from steamweb.utils import deserialize_order, serialize_order
class Player(Base):
__tablename__ = 'players'
id = Column(Integer, primary_key=True)
game = Column(Integer())
name = Column(String(50))
cash = Column(Integer())
water = Column(Integer())
ore = Column(Integer())
energy = Column(Integer())
quartz = Column(Integer())
water_carrier = Column(Integer())
energy_carrier = Column(Integer())
ore_carrier = Column(Integer())
quartz_carrier = Column(Integer())
permits = Column(Integer())
quarters = Column(Integer())
specialist = Column(Integer())
active_tank_phase = Column(Boolean())
color = Column(String(50))
bot = Column(PickleType())
def __init__(self):
self.name = None
self.game = None
self.cash = 0
self.water = 0
self.ore = 0
self.energy = 0
self.quartz = 0
self.permits = 0
self.lux_quarters = 0
self.active_tank_phase = False
def reset(self):
self.active_tank_phase = False
self.specialist = None
def __repr__(self):
return '<Player {0} specialist: {1}>'.format(self.name, self.specialist)
def determine_bid(self):
player_bids = Bid.query.filter(Bid.player == self.id).all()
if not self.is_in_order(): # passed
return -1
if not player_bids:
return 0
return max(player_bids, key=lambda x: x.bid).bid
def is_in_order(self):
game = Game.query.filter(Game.id == self.game).first()
tender = Tender.query.filter(Tender.id == game.active_tender).first()
if not tender:
return True # the only case when there is no tender is when auction is starting, all players are still in game
return self.id in tender.get_order()
def as_dict(self):
base = {c.name: getattr(self, c.name) for c in self.__table__.columns}
del base['bot']
base['bid'] = self.determine_bid()
spec = Specialist.query.filter(Specialist.id == self.specialist).first()
if spec:
base['specialist'] = spec.as_dict()
base["water_capacity"] = self.get_water_capacity()
base["energy_capacity"] = self.get_energy_capacity()
base["ore_capacity"] = self.get_ore_capacity()
base["quartz_capacity"] = self.get_quartz_capacity()
return base
def as_dict_rival(self):
"""Provides extra convenience fields and does not include cash"""
base = self.as_dict()
del base['cash'] # only cash is private
return base
def get_water_capacity(self):
carrier = get_carrier(self.game, 'water', self.water_carrier)
return carrier.capacity
def get_energy_capacity(self):
carrier = get_carrier(self.game, 'energy', self.energy_carrier)
return carrier.capacity
def get_ore_capacity(self):
carrier = get_carrier(self.game, 'ore', self.ore_carrier)
return carrier.capacity
def get_quartz_capacity(self):
carrier = get_carrier(self.game, 'quartz', self.quartz_carrier)
return carrier.capacity
def get_carrier(gameid, resource_name, tier):
carriers = Carrier.query.filter(Carrier.game == gameid).all()
carriers = [c for c in carriers if (c.resource == resource_name and c.tier == tier )]
assert len(carriers) == 1
return carriers[0]
class Game(Base):
__tablename__ = 'games'
id = Column(Integer, primary_key=True)
name = Column(String(50))
created_at = Column(Date())
active_player = Column(Integer())
active_tender = Column(Integer())
mansion_quantity = Column(Integer())
tank_index = Column(Integer())
max_tank_index = Column(Integer())
turn = Column(Integer())
last_turn = Column(Integer())
phase = Column(String(50))
order = Column(String(900)) # order in which players do their turns
# encoded as comma separated IDs of players
map_width = Column(Integer())
map_height = Column(Integer())
bonus_water = Column(Boolean())
bonus_ore = Column(Boolean())
bonus_energy = Column(Boolean())
bonus_quartz = Column(Boolean())
required_players = Column(Integer())
def get_order(self):
return deserialize_order(self.order)
def set_order(self, new_order_value):
self.order = serialize_order(new_order_value)
def __init__(self):
self.phase = 'game_starting'
self.bonus_energy = False
self.bonus_water = False
self.bonus_ore = False
self.bonus_quartz = False
def __repr__(self):
return '<Game %r>' % (self.name)
def as_dict(self):
base = {c.name: getattr(self, c.name) for c in self.__table__.columns}
players = Player.query.filter(Player.game == self.id).all()
base['players'] = [player.as_dict_rival() for player in players]
base['takenSpots'] = len([player for player in players if player.name])
specialists = self.get_specialists()
base['specialists'] = specialists
base['bonuses'] = self.get_bonuses()
base['tiles'] = self._get_tiles()
base['ledgers'] = self._get_ledgers()
base['bid'] = get_highest_bid(self.active_tender)
return base
def _get_ledgers(self):
ledgers = Ledger.query.filter(Ledger.game == self.id).all()
ledgers = [l.as_dict() for l in ledgers]
return ledgers
def _get_tiles(self):
tiles = Tile.query.filter(Tile.game == self.id)
return [tile.as_dict() for tile in tiles]
def get_specialists(self):
specs = Specialist.query.filter(Specialist.game == self.id).all()
return [spec.as_dict() for spec in specs]
def get_bonuses(self):
bonuses = []
if self.bonus_water:
bonuses.append('water')
if self.bonus_ore:
bonuses.append('ore')
if self.bonus_quartz:
bonuses.append('quartz')
if self.bonus_energy:
bonuses.append('energy')
return bonuses
class Tile(Base):
__tablename__ = 'tiles'
def __init__(self):
self.water_extractor = True # every tile can extract water
id = Column(Integer, primary_key=True)
game = Column(Integer)
owner = Column(Integer())
x = Column(Integer())
y = Column(Integer())
tank = Column(Boolean())
supercharger = Column(Boolean())
water_extractor = Column(Boolean())
energy_extractor = Column(Boolean())
ore_extractor = Column(Boolean())
quartz_extractor = Column(Boolean())
on_river = Column(Boolean())
airship_bonus = Column(Boolean())
neutral = Column(Boolean())
alternative = Column(Boolean()) # can be picked during alternative_claim
auction_target = Column(Boolean()) # is currently a target of a ventureer auction
selected = Column(Boolean()) # if the tile is currently selected by the active player
used = Column(Boolean()) # if the tile has already been used given turn
color = Column(String(50))
def __repr__(self):
return '<Tile X:{} Y:{} Owner:{}>'.format(self.x, self.y, self.owner)
def reset(self):
self.airship_bonus = False
self.alternative = False
self.auction_target = False
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Specialist(Base):
__tablename__ = 'specialists'
id = Column(Integer(), primary_key=True)
game = Column(Integer)
player = Column(Integer())
priority = Column(Integer())
name = Column(String(50))
ventureer = Column(Boolean())
engineer = Column(Boolean())
banker = Column(Boolean())
captain = Column(Boolean())
description = Column(String(256))
def __repr__(self):
return '<Specialist name:{} priority:{} ventureer:{} engineer: {} banker:{} captain:{} belongs to:{}>'.format(
self.name,
self.priority,
self.ventureer,
self.engineer,
self.banker,
self.captain,
self.player)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Carrier(Base):
__tablename__ = 'carriers'
id = Column(Integer, primary_key=True)
game = Column(Integer)
resource = Column(String(50))
tier = Column(Integer())
capacity = Column(Integer())
class Ledger(Base):
"""
Simple dict implementation for purpose of mapping price indexes to prices
and price change indexes to price changes.
For prices of things that change throughout the game.
"""
__tablename__ = 'ledgers'
id = Column(Integer, primary_key=True)
game = Column(Integer)
name = Column(String(50))
index = Column(Integer())
value = Column(Integer())
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Resource(Base):
__tablename__ = 'resources'
id = Column(Integer, primary_key=True)
game = Column(Integer)
name = Column(String(50))
max_supply = Column(Integer)
max_price_index = Column(Integer)
price_index = Column(Integer) # index on price ledger
current_supply = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Price(Base):
"""
Price of things that do not change in price throughout the game
"""
__tablename__ = 'prices'
id = Column(Integer, primary_key=True)
game = Column(Integer)
item = Column(String(50))
cash = Column(Integer())
ore = Column(Integer())
water = Column(Integer())
energy = Column(Integer())
quartz = Column(Integer())
def __repr__(self):
return "<Price Id: {} game: {} item {}>".format(self.id, self.game, self.item)
class Tender(Base):
__tablename__ = 'tenders'
id = Column(Integer, primary_key=True)
game = Column(Integer)
order = Column(String(900))
# order in which players bid, contains only active bidders
def get_order(self):
return deserialize_order(self.order)
def set_order(self, new_order_value):
self.order = serialize_order(new_order_value)
def __repr__(self):
return "<highest: {} in game: {}>".format(self.id, self.game)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Bid(Base):
__tablename__ = 'bids'
id = Column(Integer, primary_key=True)
game = Column(Integer)
bid = Column(Integer) # value in cash
player = Column(Integer)
tender = Column(Integer)
def __repr__(self):
return "<Bid {} by player: {} in game {}>".format(self.bid, self.player, self.game)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def get_highest_bid(tender_id):
all_bids = Bid.query.filter(Bid.tender == tender_id).all()
if not all_bids:
return 0
highest = sorted(all_bids, key=lambda b: b.bid)[-1]
return highest.bid
| |
import time
from binaryninja.binaryview import StructuredDataView
import binaryninjaui
from binaryninjaui import ViewFrame, UIContext
from binaryninja.enums import ThemeColor
from PySide6.QtWidgets import QWidget, QLabel, QGridLayout
from PySide6.QtGui import QPalette
class ClickableLabel(QLabel):
def __init__(self, text, color, func):
super(ClickableLabel, self).__init__(text)
style = QPalette(self.palette())
style.setColor(QPalette.WindowText, color)
self.setPalette(style)
self.setFont(binaryninjaui.getMonospaceFont(self))
self.func = func
def mousePressEvent(self, event):
self.func()
class ClickableAddressLabel(ClickableLabel):
def __init__(self, text):
super(ClickableAddressLabel, self).__init__(text, binaryninjaui.getThemeColor(ThemeColor.AddressColor), self.clickEvent)
self.address = int(text, 0)
def clickEvent(self):
viewFrame = ViewFrame.viewFrameForWidget(self)
viewFrame.navigate("Linear:" + viewFrame.getCurrentDataType(), self.address)
class ClickableCodeLabel(ClickableLabel):
def __init__(self, text):
super(ClickableCodeLabel, self).__init__(text, binaryninjaui.getThemeColor(ThemeColor.CodeSymbolColor), self.clickEvent)
self.address = int(text, 0)
def clickEvent(self):
viewFrame = ViewFrame.viewFrameForWidget(self)
viewFrame.navigate("Graph:" + viewFrame.getCurrentDataType(), self.address)
class GenericHeaders(object):
def __init__(self, data):
self.fields = []
self.fields.append(("Type", data.view_type))
if data.platform is not None:
self.fields.append(("Platform", data.platform.name))
if data.is_valid_offset(data.entry_point):
self.fields.append(("Entry Point", "0x%x" % data.entry_point, "code"))
self.columns = 1
class PEHeaders(object):
def __init__(self, data):
dos = StructuredDataView(data, "DOS_Header", data.start)
pe_offset = data.start + int(dos.e_lfanew)
coff = StructuredDataView(data, "COFF_Header", pe_offset)
pe_magic = data.read(pe_offset + len(coff), 2)
self.fields = []
if pe_magic == b"\x0b\x01":
peopt = StructuredDataView(data, "PE32_Optional_Header", pe_offset + len(coff))
self.fields.append(("Type", "PE 32-bit"))
is64bit = False
elif pe_magic == b"\x0b\x02":
peopt = StructuredDataView(data, "PE64_Optional_Header", pe_offset + len(coff))
self.fields.append(("Type", "PE 64-bit"))
is64bit = True
machine_value = int(coff.machine)
machine_enum = data.get_type_by_name("coff_machine")
machine_name = str(machine_value)
for member in machine_enum.enumeration.members:
if member.value == machine_value:
machine_name = member.name
if machine_name.startswith("IMAGE_FILE_MACHINE_"):
machine_name = machine_name[len("IMAGE_FILE_MACHINE_"):]
self.fields.append(("Machine", machine_name))
subsys_value = int(peopt.subsystem)
subsys_enum = data.get_type_by_name("pe_subsystem")
subsys_name = str(subsys_value)
for member in subsys_enum.enumeration.members:
if member.value == subsys_value:
subsys_name = member.name
if subsys_name.startswith("IMAGE_SUBSYSTEM_"):
subsys_name = subsys_name[len("IMAGE_SUBSYSTEM_"):]
self.fields.append(("Subsystem", subsys_name))
self.fields.append(("Timestamp", time.strftime("%c", time.localtime(int(coff.timeDateStamp)))))
base = int(peopt.imageBase)
self.fields.append(("Image Base", "0x%x" % base, "ptr"))
entry_point = base + int(peopt.addressOfEntryPoint)
self.fields.append(("Entry Point", "0x%x" % entry_point, "code"))
section_align = int(peopt.sectionAlignment)
self.fields.append(("Section Alignment", "0x%x" % section_align))
file_align = int(peopt.fileAlignment)
self.fields.append(("File Alignment", "0x%x" % file_align))
checksum = int(peopt.checkSum)
self.fields.append(("Checksum", "0x%.8x" % checksum))
code_base = base + int(peopt.baseOfCode)
self.fields.append(("Base of Code", "0x%x" % code_base, "ptr"))
if not is64bit:
data_base = base + int(peopt.baseOfData)
self.fields.append(("Base of Data", "0x%x" % data_base, "ptr"))
code_size = int(peopt.sizeOfCode)
self.fields.append(("Size of Code", "0x%x" % code_size))
init_data_size = int(peopt.sizeOfInitializedData)
self.fields.append(("Size of Init Data", "0x%x" % init_data_size))
uninit_data_size = int(peopt.sizeOfUninitializedData)
self.fields.append(("Size of Uninit Data", "0x%x" % uninit_data_size))
header_size = int(peopt.sizeOfHeaders)
self.fields.append(("Size of Headers", "0x%x" % header_size))
image_size = int(peopt.sizeOfImage)
self.fields.append(("Size of Image", "0x%x" % image_size))
stack_commit = int(peopt.sizeOfStackCommit)
stack_reserve = int(peopt.sizeOfStackReserve)
self.fields.append(("Stack Size", "0x%x / 0x%x" % (stack_commit, stack_reserve)))
heap_commit = int(peopt.sizeOfHeapCommit)
heap_reserve = int(peopt.sizeOfHeapReserve)
self.fields.append(("Heap Size", "0x%x / 0x%x" % (heap_commit, heap_reserve)))
linker_major = int(peopt.majorLinkerVersion)
linker_minor = int(peopt.minorLinkerVersion)
self.fields.append(("Linker Version", "%d.%.2d" % (linker_major, linker_minor)))
image_major = int(peopt.majorImageVersion)
image_minor = int(peopt.minorImageVersion)
self.fields.append(("Image Version", "%d.%.2d" % (image_major, image_minor)))
os_major = int(peopt.majorOperatingSystemVersion)
os_minor = int(peopt.minorOperatingSystemVersion)
self.fields.append(("OS Version", "%d.%.2d" % (os_major, os_minor)))
sub_major = int(peopt.majorSubsystemVersion)
sub_minor = int(peopt.minorSubsystemVersion)
self.fields.append(("Subsystem Version", "%d.%.2d" % (sub_major, sub_minor)))
coff_char_value = int(coff.characteristics)
coff_char_enum = data.get_type_by_name("coff_characteristics")
coff_char_values = []
for member in coff_char_enum.enumeration.members:
if (coff_char_value & member.value) != 0:
if member.name.startswith("IMAGE_FILE_"):
coff_char_values.append(member.name[len("IMAGE_FILE_"):])
else:
coff_char_values.append(member.name)
if len(coff_char_values) > 0:
self.fields.append(("COFF Characteristics", coff_char_values))
dll_char_value = int(peopt.dllCharacteristics)
dll_char_enum = data.get_type_by_name("pe_dll_characteristics")
dll_char_values = []
for member in dll_char_enum.enumeration.members:
if (dll_char_value & member.value) != 0:
if member.name.startswith("IMAGE_DLLCHARACTERISTICS_"):
dll_char_values.append(member.name[len("IMAGE_DLLCHARACTERISTICS_"):])
else:
dll_char_values.append(member.name)
if len(dll_char_values) > 0:
self.fields.append(("DLL Characteristics", dll_char_values))
self.columns = 3
self.rows_per_column = 9
class HeaderWidget(QWidget):
def __init__(self, parent, header):
super(HeaderWidget, self).__init__(parent)
layout = QGridLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setVerticalSpacing(1)
row = 0
col = 0
for field in header.fields:
name = field[0]
value = field[1]
fieldType = ""
if len(field) > 2:
fieldType = field[2]
layout.addWidget(QLabel(name + ": "), row, col * 3)
if isinstance(value, list):
for i in range(0, len(value)):
if fieldType == "ptr":
label = ClickableAddressLabel(value[i])
elif fieldType == "code":
label = ClickableCodeLabel(value[i])
else:
label = QLabel(value[i])
label.setFont(binaryninjaui.getMonospaceFont(self))
layout.addWidget(label, row, col * 3 + 1)
row += 1
else:
if fieldType == "ptr":
label = ClickableAddressLabel(value)
elif fieldType == "code":
label = ClickableCodeLabel(value)
else:
label = QLabel(value)
label.setFont(binaryninjaui.getMonospaceFont(self))
layout.addWidget(label, row, col * 3 + 1)
row += 1
if (header.columns > 1) and (row >= header.rows_per_column) and ((col + 1) < header.columns):
row = 0
col += 1
for col in range(1, header.columns):
layout.setColumnMinimumWidth(col * 3 - 1, UIContext.getScaledWindowSize(20, 20).width())
layout.setColumnStretch(header.columns * 3 - 1, 1)
self.setLayout(layout)
| |
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import re
import mock
import six
from osprofiler import profiler
from osprofiler.tests import test
class ProfilerGlobMethodsTestCase(test.TestCase):
def test_get_profiler_not_inited(self):
profiler.clean()
self.assertIsNone(profiler.get())
def test_get_profiler_and_init(self):
p = profiler.init("secret", base_id="1", parent_id="2")
self.assertEqual(profiler.get(), p)
self.assertEqual(p.get_base_id(), "1")
# NOTE(boris-42): until we make first start we don't have
self.assertEqual(p.get_id(), "2")
def test_start_not_inited(self):
profiler.clean()
profiler.start("name")
def test_start(self):
p = profiler.init("secret", base_id="1", parent_id="2")
p.start = mock.MagicMock()
profiler.start("name", info="info")
p.start.assert_called_once_with("name", info="info")
def test_stop_not_inited(self):
profiler.clean()
profiler.stop()
def test_stop(self):
p = profiler.init("secret", base_id="1", parent_id="2")
p.stop = mock.MagicMock()
profiler.stop(info="info")
p.stop.assert_called_once_with(info="info")
class ProfilerTestCase(test.TestCase):
def test_profiler_get_shorten_id(self):
uuid_id = "4e3e0ec6-2938-40b1-8504-09eb1d4b0dee"
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
result = prof.get_shorten_id(uuid_id)
expected = "850409eb1d4b0dee"
self.assertEqual(expected, result)
def test_profiler_get_shorten_id_int(self):
short_id_int = 42
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
result = prof.get_shorten_id(short_id_int)
expected = "2a"
self.assertEqual(expected, result)
def test_profiler_get_base_id(self):
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
self.assertEqual(prof.get_base_id(), "1")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
def test_profiler_get_parent_id(self, mock_generate_uuid):
mock_generate_uuid.return_value = "42"
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
prof.start("test")
self.assertEqual(prof.get_parent_id(), "2")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
def test_profiler_get_base_id_unset_case(self, mock_generate_uuid):
mock_generate_uuid.return_value = "42"
prof = profiler._Profiler("secret")
self.assertEqual(prof.get_base_id(), "42")
self.assertEqual(prof.get_parent_id(), "42")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
def test_profiler_get_id(self, mock_generate_uuid):
mock_generate_uuid.return_value = "43"
prof = profiler._Profiler("secret")
prof.start("test")
self.assertEqual(prof.get_id(), "43")
@mock.patch("osprofiler.profiler.datetime")
@mock.patch("osprofiler.profiler.uuidutils.generate_uuid")
@mock.patch("osprofiler.profiler.notifier.notify")
def test_profiler_start(self, mock_notify, mock_generate_uuid,
mock_datetime):
mock_generate_uuid.return_value = "44"
now = datetime.datetime.utcnow()
mock_datetime.datetime.utcnow.return_value = now
info = {"some": "info"}
payload = {
"name": "test-start",
"base_id": "1",
"parent_id": "2",
"trace_id": "44",
"info": info,
"timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
}
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
prof.start("test", info=info)
mock_notify.assert_called_once_with(payload)
@mock.patch("osprofiler.profiler.datetime")
@mock.patch("osprofiler.profiler.notifier.notify")
def test_profiler_stop(self, mock_notify, mock_datetime):
now = datetime.datetime.utcnow()
mock_datetime.datetime.utcnow.return_value = now
prof = profiler._Profiler("secret", base_id="1", parent_id="2")
prof._trace_stack.append("44")
prof._name.append("abc")
info = {"some": "info"}
prof.stop(info=info)
payload = {
"name": "abc-stop",
"base_id": "1",
"parent_id": "2",
"trace_id": "44",
"info": info,
"timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"),
}
mock_notify.assert_called_once_with(payload)
self.assertEqual(len(prof._name), 0)
self.assertEqual(prof._trace_stack, collections.deque(["1", "2"]))
def test_profiler_hmac(self):
hmac = "secret"
prof = profiler._Profiler(hmac, base_id="1", parent_id="2")
self.assertEqual(hmac, prof.hmac_key)
class WithTraceTestCase(test.TestCase):
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_trace(self, mock_start, mock_stop):
with profiler.Trace("a", info="a1"):
mock_start.assert_called_once_with("a", info="a1")
mock_start.reset_mock()
with profiler.Trace("b", info="b1"):
mock_start.assert_called_once_with("b", info="b1")
mock_stop.assert_called_once_with()
mock_stop.reset_mock()
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_trace_etype(self, mock_start, mock_stop):
def foo():
with profiler.Trace("foo"):
raise ValueError("bar")
self.assertRaises(ValueError, foo)
mock_start.assert_called_once_with("foo", info=None)
mock_stop.assert_called_once_with(info={
"etype": "ValueError",
"message": "bar"
})
@profiler.trace("function", info={"info": "some_info"})
def traced_func(i):
return i
@profiler.trace("hide_args", hide_args=True)
def trace_hide_args_func(a, i=10):
return (a, i)
@profiler.trace("foo", hide_args=True)
def test_fn_exc():
raise ValueError()
@profiler.trace("hide_result", hide_result=False)
def trace_with_result_func(a, i=10):
return (a, i)
class TraceDecoratorTestCase(test.TestCase):
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_duplicate_trace_disallow(self, mock_start, mock_stop):
@profiler.trace("test")
def trace_me():
pass
self.assertRaises(
ValueError,
profiler.trace("test-again", allow_multiple_trace=False),
trace_me)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_args(self, mock_start, mock_stop):
self.assertEqual(1, traced_func(1))
expected_info = {
"info": "some_info",
"function": {
"name": "osprofiler.tests.unit.test_profiler.traced_func",
"args": str((1,)),
"kwargs": str({})
}
}
mock_start.assert_called_once_with("function", info=expected_info)
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_args(self, mock_start, mock_stop):
self.assertEqual((1, 2), trace_hide_args_func(1, i=2))
expected_info = {
"function": {
"name": "osprofiler.tests.unit.test_profiler"
".trace_hide_args_func"
}
}
mock_start.assert_called_once_with("hide_args", info=expected_info)
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_exception(self, mock_start, mock_stop):
self.assertRaises(ValueError, test_fn_exc)
expected_info = {
"function": {
"name": "osprofiler.tests.unit.test_profiler.test_fn_exc"
}
}
expected_stop_info = {"etype": "ValueError", "message": ""}
mock_start.assert_called_once_with("foo", info=expected_info)
mock_stop.assert_called_once_with(info=expected_stop_info)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_with_result(self, mock_start, mock_stop):
self.assertEqual((1, 2), trace_with_result_func(1, i=2))
start_info = {
"function": {
"name": "osprofiler.tests.unit.test_profiler"
".trace_with_result_func",
"args": str((1,)),
"kwargs": str({"i": 2})
}
}
stop_info = {
"function": {
"result": str((1, 2))
}
}
mock_start.assert_called_once_with("hide_result", info=start_info)
mock_stop.assert_called_once_with(info=stop_info)
class FakeTracedCls(object):
def method1(self, a, b, c=10):
return a + b + c
def method2(self, d, e):
return d - e
def method3(self, g=10, h=20):
return g * h
def _method(self, i):
return i
@profiler.trace_cls("rpc", info={"a": 10})
class FakeTraceClassWithInfo(FakeTracedCls):
pass
@profiler.trace_cls("a", info={"b": 20}, hide_args=True)
class FakeTraceClassHideArgs(FakeTracedCls):
pass
@profiler.trace_cls("rpc", trace_private=True)
class FakeTracePrivate(FakeTracedCls):
pass
class FakeTraceStaticMethodBase(FakeTracedCls):
@staticmethod
def static_method(arg):
return arg
@profiler.trace_cls("rpc", trace_static_methods=True)
class FakeTraceStaticMethod(FakeTraceStaticMethodBase):
pass
@profiler.trace_cls("rpc")
class FakeTraceStaticMethodSkip(FakeTraceStaticMethodBase):
pass
class FakeTraceClassMethodBase(FakeTracedCls):
@classmethod
def class_method(cls, arg):
return arg
@profiler.trace_cls("rpc")
class FakeTraceClassMethodSkip(FakeTraceClassMethodBase):
pass
def py3_info(info):
# NOTE(boris-42): py33 I hate you.
info_py3 = copy.deepcopy(info)
new_name = re.sub("FakeTrace[^.]*", "FakeTracedCls",
info_py3["function"]["name"])
info_py3["function"]["name"] = new_name
return info_py3
def possible_mock_calls(name, info):
# NOTE(boris-42): py33 I hate you.
return [mock.call(name, info=info), mock.call(name, info=py3_info(info))]
class TraceClsDecoratorTestCase(test.TestCase):
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_args(self, mock_start, mock_stop):
fake_cls = FakeTraceClassWithInfo()
self.assertEqual(30, fake_cls.method1(5, 15))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceClassWithInfo.method1"),
"args": str((fake_cls, 5, 15)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_kwargs(self, mock_start, mock_stop):
fake_cls = FakeTraceClassWithInfo()
self.assertEqual(50, fake_cls.method3(g=5, h=10))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceClassWithInfo.method3"),
"args": str((fake_cls,)),
"kwargs": str({"g": 5, "h": 10})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_private(self, mock_start, mock_stop):
fake_cls = FakeTraceClassHideArgs()
self.assertEqual(10, fake_cls._method(10))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_args(self, mock_start, mock_stop):
fake_cls = FakeTraceClassHideArgs()
self.assertEqual(40, fake_cls.method1(5, 15, c=20))
expected_info = {
"b": 20,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceClassHideArgs.method1"),
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("a", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_private_methods(self, mock_start, mock_stop):
fake_cls = FakeTracePrivate()
self.assertEqual(5, fake_cls._method(5))
expected_info = {
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTracePrivate._method"),
"args": str((fake_cls, 5)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
@test.testcase.skip(
"Static method tracing was disabled due the bug. This test should be "
"skipped until we find the way to address it.")
def test_static(self, mock_start, mock_stop):
fake_cls = FakeTraceStaticMethod()
self.assertEqual(25, fake_cls.static_method(25))
expected_info = {
"function": {
# fixme(boris-42): Static methods are treated differently in
# Python 2.x and Python 3.x. So in PY2 we
# expect to see method4 because method is
# static and doesn't have reference to class
# - and FakeTraceStatic.method4 in PY3
"name":
"osprofiler.tests.unit.test_profiler"
".method4" if six.PY2 else
"osprofiler.tests.unit.test_profiler.FakeTraceStatic"
".method4",
"args": str((25,)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_static_method_skip(self, mock_start, mock_stop):
self.assertEqual(25, FakeTraceStaticMethodSkip.static_method(25))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_class_method_skip(self, mock_start, mock_stop):
self.assertEqual("foo", FakeTraceClassMethodSkip.class_method("foo"))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@six.add_metaclass(profiler.TracedMeta)
class FakeTraceWithMetaclassBase(object):
__trace_args__ = {"name": "rpc",
"info": {"a": 10}}
def method1(self, a, b, c=10):
return a + b + c
def method2(self, d, e):
return d - e
def method3(self, g=10, h=20):
return g * h
def _method(self, i):
return i
class FakeTraceDummy(FakeTraceWithMetaclassBase):
def method4(self, j):
return j
class FakeTraceWithMetaclassHideArgs(FakeTraceWithMetaclassBase):
__trace_args__ = {"name": "a",
"info": {"b": 20},
"hide_args": True}
def method5(self, k, l):
return k + l
class FakeTraceWithMetaclassPrivate(FakeTraceWithMetaclassBase):
__trace_args__ = {"name": "rpc",
"trace_private": True}
def _new_private_method(self, m):
return 2 * m
class TraceWithMetaclassTestCase(test.TestCase):
def test_no_name_exception(self):
def define_class_with_no_name():
@six.add_metaclass(profiler.TracedMeta)
class FakeTraceWithMetaclassNoName(FakeTracedCls):
pass
self.assertRaises(TypeError, define_class_with_no_name, 1)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_args(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassBase()
self.assertEqual(30, fake_cls.method1(5, 15))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassBase.method1"),
"args": str((fake_cls, 5, 15)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_kwargs(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassBase()
self.assertEqual(50, fake_cls.method3(g=5, h=10))
expected_info = {
"a": 10,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassBase.method3"),
"args": str((fake_cls,)),
"kwargs": str({"g": 5, "h": 10})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_private(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassHideArgs()
self.assertEqual(10, fake_cls._method(10))
self.assertFalse(mock_start.called)
self.assertFalse(mock_stop.called)
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_without_args(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassHideArgs()
self.assertEqual(20, fake_cls.method5(5, 15))
expected_info = {
"b": 20,
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassHideArgs.method5")
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("a", expected_info))
mock_stop.assert_called_once_with()
@mock.patch("osprofiler.profiler.stop")
@mock.patch("osprofiler.profiler.start")
def test_private_methods(self, mock_start, mock_stop):
fake_cls = FakeTraceWithMetaclassPrivate()
self.assertEqual(10, fake_cls._new_private_method(5))
expected_info = {
"function": {
"name": ("osprofiler.tests.unit.test_profiler"
".FakeTraceWithMetaclassPrivate._new_private_method"),
"args": str((fake_cls, 5)),
"kwargs": str({})
}
}
self.assertEqual(1, len(mock_start.call_args_list))
self.assertIn(mock_start.call_args_list[0],
possible_mock_calls("rpc", expected_info))
mock_stop.assert_called_once_with()
| |
from __future__ import division, print_function, absolute_import
import unittest, itertools
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from amitgroup.stats import bernoullimm
from sklearn.datasets.samples_generator import make_spd_matrix
rng = np.random.RandomState(0)
def test_sample_bernoulli():
"""
Test sample generation from mixture.sample_bernoulli
"""
n_features, n_samples = 2, 300
axis = 1
mu = np.clip(rng.rand(n_features),.01,.99)
samples = bernoullimm.sample_bernoulli(
mu, n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
def _naive_lpbpdf(X, mu):
# slow and naive implementation of lpbpdf
ref = np.empty((len(X), len(mu)))
for i, m in enumerate(mu):
ref[:, i] = stats.bernoulli.logpmf(X, m).sum(axis=1)
return ref
def test_lpbpdf():
"""
test a slow and naive implementation of lmvnpdf and
compare it to the vectorized version (mixture.lmvnpdf) to test
for correctness
"""
n_features, n_components, n_samples = 2, 3, 10
mu = np.clip(rng.rand(n_components,n_features),.01,.99)
log_inv_mu = np.log(1-mu)
log_mu_odds = np.log(mu) - log_inv_mu
log_inv_mu_sums = log_inv_mu.sum(-1)
X = (rng.rand(n_samples, n_features) > .5).astype(np.uint8)
ref = _naive_lpbpdf(X, mu)
lpr = bernoullimm.log_product_of_bernoullis_mixture_likelihood(X, log_mu_odds, log_inv_mu_sums)
assert_array_almost_equal(lpr, ref)
def test_BernoulliMM_attributes():
n_components, n_features = 10, 4
b = bernoullimm.BernoulliMM(n_components, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = np.clip(rng.rand(n_components, n_features),
.01,.99)
log_inv_means = np.log(1-means)
log_odds = np.log(means) - log_inv_means
log_inv_mean_sums = log_inv_means.sum(-1)
assert_true(b.n_components == n_components)
b.weights_ = weights
assert_array_almost_equal(b.weights_, weights)
b.means_ = means
assert_array_almost_equal(b.means_, means)
b.log_inv_means_ = log_inv_means
assert_array_almost_equal(b.log_inv_means_, log_inv_means)
b.log_odds_ = log_odds
assert_array_almost_equal(b.log_odds_, log_odds)
b.log_inv_mean_sums_ = log_inv_mean_sums
assert_array_almost_equal(b.log_inv_mean_sums_, log_inv_mean_sums)
class BernoulliMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 400
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = np.clip(rng.rand(self.n_components,
self.n_features),
.01,.99)
self.threshold = -0.5
self.I = np.eye(self.n_features)
def test_eval(self):
b = self.model(n_components=self.n_components,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
b.means_ = self.means
b.log_odds_, b.log_inv_mean_sums_ = bernoullimm._compute_log_odds_inv_means_sums(self.means)
b.weights_ = self.weights
bernoulliidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(bernoulliidx)
X = (rng.randn(n_samples, self.n_features) <= b.means_[bernoulliidx]).astype(np.uint8)
ll, responsibilities = b.eval(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), bernoulliidx)
def test_sample(self, n=100):
b = self.model(n_components=self.n_components,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
b.means_ = self.means
b.log_odds_, b.log_inv_mean_sums_ = bernoullimm._compute_log_odds_inv_means_sums(self.means)
b.weights_ = self.weights
samples = b.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wm'):
b = bernoullimm.BernoulliMM(n_components=self.n_components)
b.weights_ = self.weights
b.means_ = self.means
b.log_odds_, b.log_inv_mean_sums_ = bernoullimm._compute_log_odds_inv_means_sums(self.means)
# Create a training set by sampling from the predefined distribution.
X = b.sample(n_samples=100)
b = self.model(n_components=self.n_components,
random_state=rng,
n_iter=1, init_params=params)
b.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for iter in range(5):
b.params = params
b.init_params = ''
b.fit(X)
trainll.append(self.score(b, X))
b.n_iter = 10
b.init_params = ''
b.params = params
b.fit(X) # finish fitting
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f. The likelihoods are %s."
% (delta_min, self.threshold, trainll))
def test_train_degenerate(self, params='wm'):
""" Train on degenerate data with 0 in some dimensions
"""
# Create a training set by sampling from the predefined distribution.
X = (rng.rand(100, self.n_features) > .5).astype(np.uint8)
X.T[1:] = 0
b = self.model(n_components=2,
random_state=rng, n_iter=5,
init_params=params)
b.fit(X)
trainll = b.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wm'):
""" Train on 1-D data
"""
# Create a training set by sampling from the predefined distribution.
X = (rng.rand(100, 1) > .5).astype(np.uint8)
#X.T[1:] = 0
b = self.model(n_components=2,
random_state=rng, n_iter=5,
init_params=params)
b.fit(X)
trainll = b.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, b, X):
return b.score(X).sum()
class TestBernoulliMMW(unittest.TestCase, BernoulliMMTester):
model = bernoullimm.BernoulliMM
setUp = BernoulliMMTester._setUp
def test_multiple_init():
"""Test that multiple inits performs at least as well as a single one"""
X = (rng.rand(30, 5) > .5).astype(np.uint8)
b = bernoullimm.BernoulliMM(n_components=2,
random_state=rng, n_iter=5)
train1 = b.fit(X).score(X).sum()
b.n_init = 5
out_b = b.fit(X)
print(out_b.means_)
train2 = out_b.score(X).sum()
print("train2 = {0}, train1 = {1}".format(train2,train1))
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
"""Test that the right number of parameters is estimated"""
n_samples, n_dim, n_components = 7, 5, 2
X = (rng.rand(n_samples, n_dim)>.5).astype(np.uint8)
n_params = n_dim * n_components + n_components -1
b = bernoullimm.BernoulliMM(n_components=n_components,
random_state=rng, n_iter=1)
b.fit(X)
assert_true(b._n_parameters() == n_params)
# def test_aic():
# """ Test the aic and bic criteria"""
# n_samples, n_dim, n_components = 50, 3, 2
# X = (rng.randn(n_samples, n_dim) > .5).astype(np.uint8)
# SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
# b = bernoullimm.BernoulliMM(n_components=n_components,
# random_state=rng)
# b.fit(X)
# aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
# bic = (2 * n_samples * SGH * n_dim +
# np.log(n_samples) * g._n_parameters())
# bound = n_dim * 3. / np.sqrt(n_samples)
# assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
# assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
if __name__ == '__main__':
import nose
nose.runmodule()
| |
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QHBoxLayout, QRadioButton, QButtonGroup, QLabel, QLineEdit, QFormLayout, QWidget, QMessageBox
from PyQt5.QtGui import QIcon, QPainter, QColor, QPen, QImage, QPalette, QBrush, QPixmap
from PyQt5.QtCore import QSize, Qt, QThread, pyqtSignal, pyqtSlot
from openpyxl import Workbook
from openpyxl.styles import Font
from openpyxl import load_workbook
import sys
import time
import os
def trap_exc_during_debug(*args):
print(args)
sys.excepthook = trap_exc_during_debug
class workerThread(QThread):
signal = pyqtSignal(str)
def __init__(self):
super().__init__()
self.abort = False
@pyqtSlot()
def run(self):
#print("yeah yeah ")
time.sleep(0.1)
app.processEvents()
self.signal.emit('Done')
def __del__(self):
#print("okay okay")
self.abort = True
self.wait()
class General(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
saveButton = QPushButton("SAVE",self)
clearButton = QPushButton("CLEAR",self)
saveButton.setFont(QtGui.QFont("Calibri", 13))
clearButton.setFont(QtGui.QFont("Calibri", 13))
saveButton.move(100,680)
clearButton.move(260,680)
comboBoxyopass = QtWidgets.QComboBox(self)
comboBoxyopass.addItem("SELECT")
i=2050
while i>=2000:
comboBoxyopass.addItem(str(i))
i-=1
comboBoxyopass.setMinimumHeight(35)
comboBoxyopass.setFixedWidth(150)
comboBoxyopass.setFont(QtGui.QFont("Calibri", 14))
comboBoxtrcentr = QtWidgets.QComboBox(self)
comboBoxtrcentr.addItem("SELECT CENTER")
comboBoxtrcentr.addItem('Jaipur ')
comboBoxtrcentr.addItem('Hyderabad')
comboBoxtrcentr.addItem('Raipur ')
comboBoxtrcentr.addItem('Lucknow ')
comboBoxtrcentr.addItem('Pune ')
comboBoxtrcentr.addItem('Vizag ')
comboBoxtrcentr.addItem('Bhopal ')
comboBoxtrcentr.addItem('Delhi ')
comboBoxtrcentr.setMinimumHeight(35)
comboBoxtrcentr.setFixedWidth(180)
comboBoxtrcentr.setFont(QtGui.QFont("Calibri", 14))
comboBoxcourse = QtWidgets.QComboBox(self)
comboBoxcourse.addItem("SELECT COURSE")
comboBoxcourse.addItem('ESR (30 Days) ')
comboBoxcourse.addItem('ESR (45 Days) ')
comboBoxcourse.addItem('Matlab (30 Days) ')
comboBoxcourse.addItem('IOT (15 Days) ')
comboBoxcourse.addItem('IOT (30 Days) ')
comboBoxcourse.addItem('JAVA (30 Days) ')
comboBoxcourse.addItem('Python (30 Days) ')
comboBoxcourse.addItem('PLC-SCADA (30 Days)')
comboBoxcourse.addItem('C/C++ (45 Days) ')
comboBoxcourse.addItem('Android (15 Days) ')
comboBoxcourse.addItem('Android (30 Days) ')
comboBoxcourse.setMinimumHeight(35)
comboBoxcourse.setFixedWidth(200)
comboBoxcourse.setFont(QtGui.QFont("Calibri", 14))
comboBoxsem = QtWidgets.QComboBox(self)
comboBoxsem.addItem("SELECT")
i=1
while i<=8:
comboBoxsem.addItem(str(i))
i+=1
comboBoxsem.addItem("Passed Out")
comboBoxsem.setMinimumHeight(35)
comboBoxsem.setFixedWidth(100)
comboBoxsem.setFont(QtGui.QFont("Calibri", 14))
comboBoxstate = QtWidgets.QComboBox(self)
comboBoxstate.addItem("SELECT")
comboBoxstate.addItem('Andhra Pradesh')
comboBoxstate.addItem('Arunachal Pradesh')
comboBoxstate.addItem('Assam')
comboBoxstate.addItem('Bihar')
comboBoxstate.addItem('Goa')
comboBoxstate.addItem('Gujarat')
comboBoxstate.addItem('Haryana')
comboBoxstate.addItem('Himachal Pradesh')
comboBoxstate.addItem('Jammu & Kashmir')
comboBoxstate.addItem('Karnataka')
comboBoxstate.addItem('Kerala')
comboBoxstate.addItem('Madhya Pradesh')
comboBoxstate.addItem('Maharashtra')
comboBoxstate.addItem('Manipur')
comboBoxstate.addItem('Meghalaya')
comboBoxstate.addItem('Mizoram')
comboBoxstate.addItem('Nagaland')
comboBoxstate.addItem('Orissa')
comboBoxstate.addItem('Punjab')
comboBoxstate.addItem('Rajasthan')
comboBoxstate.addItem('Sikkim')
comboBoxstate.addItem('Tamil Nadu')
comboBoxstate.addItem('Tripura')
comboBoxstate.addItem('Uttar Pradesh')
comboBoxstate.addItem('West Bengal')
comboBoxstate.addItem('Chhattisgarh')
comboBoxstate.addItem('Uttarakhand')
comboBoxstate.addItem('Jharkhand')
comboBoxstate.addItem('Telangana')
comboBoxstate.setMinimumHeight(35)
comboBoxstate.setFixedWidth(250)
comboBoxstate.setFont(QtGui.QFont("Calibri", 14))
hboxsex = QHBoxLayout()
hboxsex.setSpacing(60)
r1 = QRadioButton("Male")
r1.setFont(QtGui.QFont("Calibri", 10.5, QtGui.QFont.Bold))
r1.setMinimumHeight(30)
r2 = QRadioButton("Female")
r2.setFont(QtGui.QFont("Calibri", 10.5, QtGui.QFont.Bold))
r2.setMinimumHeight(30)
widgetsex=QWidget(self)
groupsex=QButtonGroup(widgetsex)
groupsex.addButton(r1)
groupsex.addButton(r2)
hboxsex.addWidget(r1)
hboxsex.addWidget(r2)
hboxsex.addStretch()
headerfont = QtGui.QFont("Cambria", 13, QtGui.QFont.Bold)
saveloc=str("Student_List.xlsx")
l1 = QLabel("Name: ")
l1.setFont(headerfont)
l1.setMinimumHeight(30)
l1.setFixedWidth(180)
text1 = QLineEdit()
text1.setFixedWidth(600)
text1.setMinimumHeight(30)
text1.setFont(QtGui.QFont("Times", 11))
l2 = QLabel("Email Id: ")
l2.setFont(headerfont)
l2.setMinimumHeight(30)
l2.setFixedWidth(180)
text2 = QLineEdit()
text2.setFixedWidth(600)
text2.setMinimumHeight(30)
text2.setFont(QtGui.QFont("Times", 11))
l3 = QLabel("Contact No.: ")
l3.setFont(headerfont)
l3.setMinimumHeight(30)
l3.setFixedWidth(180)
text3 = QLineEdit()
text3.setFixedWidth(600)
text3.setMinimumHeight(30)
text3.setFont(QtGui.QFont("Times", 11))
l4 = QLabel("City: ")
l4.setFont(headerfont)
l4.setMinimumHeight(30)
l4.setFixedWidth(180)
text4 = QLineEdit()
text4.setFixedWidth(600)
text4.setMinimumHeight(30)
text4.setFont(QtGui.QFont("Times", 11))
l5 = QLabel("State: ")
l5.setFont(headerfont)
l5.setMinimumHeight(30)
l5.setFixedWidth(180)
l6 = QLabel("College: ")
l6.setFont(headerfont)
l6.setMinimumHeight(30)
l6.setFixedWidth(180)
text6 = QLineEdit()
text6.setFixedWidth(600)
text6.setMinimumHeight(30)
text6.setFont(QtGui.QFont("Times", 11))
l7 = QLabel("Branch: ")
l7.setFont(headerfont)
l7.setMinimumHeight(30)
l7.setFixedWidth(180)
text7 = QLineEdit()
text7.setFixedWidth(600)
text7.setMinimumHeight(30)
text7.setFont(QtGui.QFont("Times", 11))
l8 = QLabel("Semester: ")
l8.setFont(headerfont)
l8.setMinimumHeight(30)
l8.setFixedWidth(180)
l9 = QLabel("Year Of Passing: ")
l9.setFont(headerfont)
l9.setFixedWidth(180)
l10 = QLabel("Course: ")
l10.setFont(headerfont)
l10.setMinimumHeight(30)
l10.setFixedWidth(180)
l11 = QLabel("Batch: ")
l11.setFont(headerfont)
l11.setMinimumHeight(30)
l11.setFixedWidth(180)
text11 = QLineEdit()
text11.setFixedWidth(600)
text11.setMinimumHeight(30)
text11.setFont(QtGui.QFont("Times", 11))
l12 = QLabel("Training Center: ")
l12.setFont(headerfont)
l12.setMinimumHeight(30)
l12.setFixedWidth(180)
l13 = QLabel("SEX: ")
l13.setFont(headerfont)
l13.setFixedWidth(180)
l14 = QLabel("Save File As: ")
l14.setFont(headerfont)
l14.setMinimumHeight(30)
l14.setFixedWidth(180)
text14 = QLineEdit()
text14.setFixedWidth(600)
text14.setMinimumHeight(30)
text14.setFont(QtGui.QFont("Times", 11,QtGui.QFont.Bold))
text14.setText(saveloc)
l15 = QLabel("Query/Regarding What: ")
l15.setFont(QtGui.QFont("Cambria", 12, QtGui.QFont.Bold))
l15.setMinimumHeight(30)
l15.setFixedWidth(200)
text15 = QLineEdit()
text15.setFixedWidth(600)
text15.setMinimumHeight(30)
text15.setFont(QtGui.QFont("Times", 11))
hboxcourse = QHBoxLayout()
hboxcourse.setSpacing(25)
l16 = QLabel("Others: ")
l16.setFont(headerfont)
l16.setMinimumHeight(30)
l16.setFixedWidth(100)
text16 = QLineEdit()
text16.setFixedWidth(250)
text16.setMinimumHeight(30)
text16.setFont(QtGui.QFont("Times", 11))
hboxcourse.addWidget(comboBoxcourse)
hboxcourse.addWidget(l16)
hboxcourse.addWidget(text16)
hboxcourse.addStretch()
hboxstate = QHBoxLayout()
hboxstate.setSpacing(25)
l17 = QLabel("Others: ")
l17.setFont(headerfont)
l17.setMinimumHeight(30)
l17.setFixedWidth(70)
text17 = QLineEdit()
text17.setFixedWidth(230)
text17.setMinimumHeight(30)
text17.setFont(QtGui.QFont("Times", 11))
hboxstate.addWidget(comboBoxstate)
hboxstate.addWidget(l17)
hboxstate.addWidget(text17)
hboxstate.addStretch()
fbox = QFormLayout()
fbox.setVerticalSpacing(10)
fbox.addRow(l1,text1)
fbox.addRow(l2,text2)
fbox.addRow(l3,text3)
fbox.addRow(l4,text4)
fbox.addRow(l5,hboxstate)
fbox.addRow(l6,text6)
fbox.addRow(l7,text7)
fbox.addRow(l8,comboBoxsem)
fbox.addRow(l9,comboBoxyopass)
fbox.addRow(l10,hboxcourse)
fbox.addRow(l11,text11)
fbox.addRow(l12,comboBoxtrcentr)
l18 = QLabel("Training Session: ")
l18.setFont(headerfont)
l18.setMinimumHeight(30)
l18.setFixedWidth(200)
hboxperiod = QHBoxLayout()
hboxperiod.setSpacing(70)
r3 = QRadioButton("Summer Training")
r3.setFont(QtGui.QFont("Calibri", 10, QtGui.QFont.Bold))
r3.setMinimumHeight(30)
r4 = QRadioButton("Winter Training")
r4.setFont(QtGui.QFont("Calibri", 10, QtGui.QFont.Bold))
r4.setMinimumHeight(30)
r5 = QRadioButton("Project Based")
r5.setFont(QtGui.QFont("Calibri", 10, QtGui.QFont.Bold))
r5.setMinimumHeight(30)
r6 = QRadioButton("Other")
r6.setFont(QtGui.QFont("Calibri", 10, QtGui.QFont.Bold))
r6.setMinimumHeight(30)
widgetperiod=QWidget(self)
groupperiod=QButtonGroup(widgetperiod)
groupperiod.addButton(r3)
groupperiod.addButton(r4)
groupperiod.addButton(r5)
groupperiod.addButton(r6)
hboxperiod.addWidget(r3)
hboxperiod.addWidget(r4)
hboxperiod.addWidget(r5)
hboxperiod.addWidget(r6)
hboxperiod.addStretch()
fbox.addRow(l18,hboxperiod)
fbox.addRow(l13,hboxsex)
fbox.addRow(l15,text15)
fbox.addRow(l14,text14)
self.lineedits = [text1,text2,text3,text4,text6,text7,text11,text14,text15,text16,text17]
self.saveedit=[text14]
self.comboBox = [comboBoxstate,comboBoxsem,comboBoxyopass,comboBoxtrcentr,comboBoxcourse]
self.radiobutton=[r1,r2,r3,r4,r5,r6]
saveButton.clicked.connect(self.saveClicked)
clearButton.clicked.connect(self.clearClicked)
self.setLayout(fbox)
try:
self.setWindowState(QtCore.Qt.WindowMaximized)
except:
self.setGeometry(10, 30, 1350, 750)
self.setWindowTitle('Managemet System Software ')
self.setWindowIcon(QIcon('logso.png')) # Enter your Icon Image url here
oImage = QImage("image2.jpg") # Enter your Background Image url here
sImage = oImage.scaled(QSize(1350,750))
palette = QPalette()
palette.setBrush(10, QBrush(sImage))
self.setPalette(palette)
self.show()
def validContact(self,phone_number):
if len(phone_number)!=10:
return False
else:
for i in range(10):
if (ord(phone_number[i])-48) not in range(10):
return False
return True
def savesuccess(self):
savemsg = QMessageBox()
savemsg.setIcon(QMessageBox.Information)
savemsg.setText("Your Enteries Have Been Saved successfully !")
savemsg.setWindowTitle("SAVED")
savemsg.setWindowIcon(QIcon('logso.png'))
savemsg.exec()
def errorcontactmsg(self):
erroremsg = QMessageBox()
erroremsg.setIcon(QMessageBox.Warning)
erroremsg.setText("Please Enter A Valid Email ID!!")
erroremsg.setWindowTitle("Invalid Email ID")
erroremsg.setWindowIcon(QIcon('logso.png'))
erroremsg.exec()
def erroremailmsg(self):
erroremsg = QMessageBox()
erroremsg.setIcon(QMessageBox.Warning)
erroremsg.setText("Please Enter A Valid Email ID!!")
erroremsg.setWindowTitle("Invalid Email ID")
erroremsg.setWindowIcon(QIcon('logso.png'))
erroremsg.exec()
def errornormmsg(self):
errormsg = QMessageBox()
errormsg.setIcon(QMessageBox.Warning)
errormsg.setText("Please Enter The necessary Fields: Name, Email Id, Conact No. !!")
errormsg.setWindowTitle("Error Report")
errormsg.setWindowIcon(QIcon('logso.png'))
errormsg.exec()
def crashingmsg(self):
crashmsg = QMessageBox()
crashmsg.setIcon(QMessageBox.Critical)
crashmsg.setText("The XML File is Already Open")
crashmsg.setDetailedText("Please Close the XML file and Try again")
crashmsg.setWindowTitle("Programe Crashing")
crashmsg.setWindowIcon(QIcon('logso.png'))
crashmsg.exec()
def saveClicked(self):
try:
tex=[]
for edit in self.lineedits:
tex.append(str(edit.text()))
if tex[0] and tex[1] and tex[2]:
if ('@gmail.com' in str(tex[1])) or ('@yahoo.com' in str(tex[1])) or ('@' in str(tex[1])):
contact=str(tex[2])
if self.validContact(contact):
try :
wb = load_workbook(os.path.join("C:\Management_sys_excels",str(tex[7])))
ws = wb.active
row=ws._current_row
sno=int(ws['A'+str(row)].internal_value)
tkno=int(ws['B'+str(row)].internal_value)
row+=1
except:
wb = Workbook()
ws = wb.active
ws.title="Student_List"
ft = Font(bold=True)
ws['A1']="S.NO"
ws['B1']="TOKEN NO."
ws['C1']="NAME"
ws['D1']="EMAIL ID"
ws['E1']="CONTACT NO."
ws['F1']="CITY"
ws['G1']="STATE"
ws['H1']="COLLEGE"
ws['I1']="BRANCH"
ws['J1']="SEMESTER"
ws['K1']="YEAR OF PASSING"
ws['L1']="COURSE"
ws['M1']="DATE OF REGISTERATION"
ws['N1']="BATCH"
ws['O1']="TRAINING CENTER"
ws['P1']="TRAINING SESSION"
ws['Q1']="SEX"
ws['R1']="QUERY/REGARDING WHAT"
ws['S1']="TOTAL FEE"
ws['T1']="SUBMITTED FEE"
ws['U1']="REMAINING FEE"
ws['V1']="DISCOUNT"
ws['W1']="REMARKS"
ws['X1']="ADDITIONAL REMARKS"
ws['Y1']="SYSTEM DATE AND TIME"
j=65
while j<=88:
ws[chr(j)+'1'].font=ft
j+=1
sno=0
tkno=0
row=2
ws['A'+str(row)]=int(sno+1)
ws['B'+str(row)]=int(tkno+1)
j=0
while j<4:
ws[chr(67+j)+str(row)]=str(tex[j])
j+=1
stateopt=str(self.comboBox[0].currentText())
if stateopt != "SELECT":
ws['G'+str(row)]=stateopt
else:
if tex[10]:
ws['G'+str(row)]=str(tex[10])
ws['H'+str(row)]=str(tex[4])
ws['I'+str(row)]=str(tex[5])
ws['J'+str(row)]=str(self.comboBox[1].currentText())
ws['K'+str(row)]=str(self.comboBox[2].currentText())
courseopt=str(self.comboBox[4].currentText())
if courseopt != "SELECT COURSE":
ws['L'+str(row)]=courseopt
else:
if tex[9]:
ws['L'+str(row)]=str(tex[9])
ws['N'+str(row)]=str(tex[6])
ws['O'+str(row)]=str(self.comboBox[3].currentText())
if self.radiobutton[2].isChecked():
ws['P'+str(row)]=str(self.radiobutton[2].text())
else:
if self.radiobutton[3].isChecked():
ws['P'+str(row)]=str(self.radiobutton[3].text())
else:
if self.radiobutton[4].isChecked():
ws['P'+str(row)]=str(self.radiobutton[4].text())
else:
if self.radiobutton[5].isChecked():
ws['P'+str(row)]=str(self.radiobutton[5].text())
if self.radiobutton[0].isChecked():
ws['Q'+str(row)]=str(self.radiobutton[0].text())
else:
if self.radiobutton[1].isChecked():
ws['Q'+str(row)]=str(self.radiobutton[1].text())
ws['R'+str(row)]=str(tex[8])
ws['Y'+str(row)]=str(QtCore.QDateTime.currentDateTime().toString())
if not os.path.exists("C:\Management_sys_excels"):
os.makedirs("C:\Management_sys_excels")
wb.save(os.path.join("C:\Management_sys_excels",str(tex[7])))
self.workerthread = workerThread()
self.workerthread.signal.connect(self.savesuccess)
self.workerthread.start()
else:
self.workerthread = workerThread()
self.workerthread.signal.connect(self.errorcontactmsg)
self.workerthread.start()
else:
self.workerthread = workerThread()
self.workerthread.signal.connect(self.erroremailmsg)
self.workerthread.start()
else:
self.workerthread = workerThread()
self.workerthread.signal.connect(self.errornormmsg)
self.workerthread.start()
except:
self.workerthread = workerThread()
self.workerthread.signal.connect(self.crashingmsg)
self.workerthread.start()
def clearClicked(self,checked=False):
for edit in self.lineedits:
if edit != self.saveedit[0]:
edit.clear()
for box in self.comboBox:
box.setCurrentIndex(0)
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message',
"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = General()
sys.exit(app.exec_())
| |
#-*- coding: utf-8 -*-
"""
Python CDL parsing library
"""
__version__ = '0.5.5'
__author__ = 'simon'
import sys
import logging
import json
import os
import re
from xml.dom import minidom
import traceback
allow_edls = False
try:
import edl
allow_edls = True
except Exception as e:
allow_edls = False
class CDL(object):
""" The CDL
Holds :class:`.ColorDecision` instances. It can be indexed to reach each
of the :class: `.ColorDecision` s like::
>>> cdl = CDL(flavour=CDL.XML_CDL)
>>> cdl.append(ColorDecision(color_decision_node))
>>> cdl[0]
:param str flavour: Optional cdl flavour definition defaults to XML, other
flavours to be added later.
`flavour` can be skipped
"""
XML_CDL = 'xml_cdl'
EDL_CDL = 'edl_cdl'
COLOR_DECISION = 'COLOR_DECISION'
COLOR_CORRECTION = 'COLOR_CORRECTION'
def __init__(self, flavour=XML_CDL, filename=None, timebase="24"):
self._edls_enabled = allow_edls
self._timebase = timebase
self._flavour = flavour
self._cdl_string = None
self._color_items = []
self._item_type = CDL.COLOR_DECISION
self._filename = None
if filename:
self._filename = os.path.basename(filename)
self.load(filename)
def __getitem__(self, item):
""" returns ColorDecision by index
"""
return self._color_items[item]
def __len__(self):
"""
gets length of color decisions for iterating over color decisions
"""
return len(self._color_items)
def edls_enabled(self):
return self._edls_enabled
def append(self, color_item):
self._color_items.append(color_item)
def load(self, filename):
"""
Loads CDL from filename, passes to the read function for loading
"""
with open(filename) as filehandle:
self.read(filehandle)
def read(self, filehandle):
"""
Reads CDL from Filehandle, passed to loads for processing
"""
cdl_string = filehandle.read()
self.loads(cdl_string)
def loads(self, cdl_string):
"""
Loads CDL from string, depending on the flavour definition it passes
to the processing function
"""
if cdl_string and len(cdl_string) > 0:
self._cdl_string = cdl_string
if self._flavour == CDL.XML_CDL:
self.parse_xml(cdl_string)
elif self._flavour == CDL.EDL_CDL:
self.parse_edl(cdl_string)
else:
raise Exception("Invalid CDL type specified")
else:
raise Exception("Empty CDL string")
def parse_edl(self, cdl_string):
if self._edls_enabled:
self.item_type = CDL.COLOR_CORRECTION
parser = edl.Parser(self._timebase)
edl_list = parser.parse(cdl_string)
for event in edl_list:
color_correction = ColorCorrection(cdl_edl_strings=event.get_comments(),source_file=self._filename)
self._color_items.append(color_correction)
def get_dom(self, cdl_string):
return minidom.parseString(cdl_string)
def parse_xml(self, cdl_string):
"""
Parse CDL XML string into ColorDecision objects
"""
xmldoc = self.get_dom(cdl_string)
if len(xmldoc.getElementsByTagName('ColorDecision')) > 0:
self._item_type = CDL.COLOR_DECISION
for node in xmldoc.getElementsByTagName('ColorDecision'):
color_decision = ColorDecision(node, source_file=self._filename)
self._color_items.append(color_decision)
elif len(xmldoc.getElementsByTagName('ColorCorrection')) > 0:
self._item_type = CDL.COLOR_CORRECTION
for node in xmldoc.getElementsByTagName('ColorCorrection'):
color_correction = ColorCorrection(node, source_file=self._filename)
self._color_items.append(color_correction)
else:
raise Exception("No color decisions found")
def first_color_item(self):
"""
returns the first color item in the list, simple function,
throws exception if no color item
"""
if len(self._color_items) > 0:
return self._color_items[0]
else:
raise Exception("No Color Item Available")
def __repr__(self):
"""
returns string representation of CDL
"""
rep_list = []
for color_item in self._color_items:
rep_list.append(str(color_item))
return "\n".join(rep_list)
def get_item_type(self):
return self._item_type
def get_color_items(self):
return self._color_items
class ColorDecision(object):
"""
A singular color decision can contain multiple color corrections
"""
def __init__(self, color_decision_node=None, decision_id=None, source_file=None):
"""
initialise object with a color decision dom object, optional
"""
self._decision_tree = color_decision_node
self._corrections = []
self._decision_id = decision_id
self._source_file = source_file
if color_decision_node:
self.load_dom(self._decision_tree)
def __getitem__(self, item):
"""
Returns color correction object based on index
"""
return self._corrections[item]
def __len__(self):
"""
Returns number of color corrections available
"""
return len(self._corrections)
def append(self, color_correction):
self._corrections.append(color_correction)
def load_dom(self, decision_dom):
"""
Load the color decision from a dom node passed to it.
"""
try:
self._decision_id = decision_dom._get_id()
logging.log(logging.DEBUG,
"id for color decision set to " + str(self._decision_id))
except Exception as exception:
logging.log(logging.DEBUG,
"No id attribute set for color decision")
logging.log(logging.DEBUG,
exception)
color_corrections = decision_dom.getElementsByTagName('ColorCorrection')
for correction in color_corrections:
color_correction = ColorCorrection(correction, source_file=self._source_file)
self.append(color_correction)
def __repr__(self):
"""
Rteurns a string representation of the color decision
"""
rep_list = []
for correction in self._corrections:
rep_list.append(str(correction))
return "\n".join(rep_list)
def first_correction(self):
"""
Returns the first available correction in the system throws exception
if none available
"""
if len(self._corrections) > 0:
return self._corrections[0]
else:
raise Exception("No Color Correction Available")
def get_corrections(self):
return self._corrections;
class ColorCorrection(object):
"""
An individual color correction object
"""
def __init__(self, color_correction_node=None, cdl_edl_strings=None, source_file=None):
"""
initialise object with a color correction dom object, optional
"""
self._correction_tree = color_correction_node
self._slope = (1.0, 1.0, 1.0)
self._power = (1.0, 1.0, 1.0)
self._offset = (0.0, 0.0, 0.0)
self._saturation = 1.0
self._id = None
self._source_file = source_file
self._cdl_matchers = [r"\*\s*ASC_SOP \((\-*\d\.\d+) (\-*\d\.\d+) (\-*\d\.\d+)\)\((\-*\d\.\d+) (\-*\d\.\d+) (\-*\d\.\d+)\)\((\-*\d\.\d+) (\-*\d\.\d+) (\-*\d\.\d+)\)", r"\*\s*ASC_SAT (\-*\d\.\d+)", r"\*\s*FROM CLIP NAME\:\s+(\w+)"]
if color_correction_node:
self.load_dom(color_correction_node)
elif cdl_edl_strings:
for s in cdl_edl_strings:
self.process_edl_string(s)
def process_edl_string(self, s):
if re.match(self._cdl_matchers[0], s):
m = re.match(self._cdl_matchers[0], s)
self._slope = (float(m.group(1)),float(m.group(2)), float(m.group(3)))
self._offset = (float(m.group(4)),float(m.group(5)), float(m.group(6)))
self._power = (float(m.group(7)),float(m.group(8)), float(m.group(9)))
if re.match(self._cdl_matchers[1], s):
m = re.match(self._cdl_matchers[1], s)
self._saturation = float(m.group(1))
if re.match(self._cdl_matchers[2], s):
m = re.match(self._cdl_matchers[2], s)
self._id = m.group(1)
def to_JSON(self):
struct = {
"slope":self._slope,
"power":self._power,
"offset":self._offset,
"saturation":self.saturation,
"id":self._id
}
return json.dumps(struct)
def load_dom(self, correction_dom):
"""
Load the color correction from a dom node passed to it. Does not throw
exception but logs parsing errors to the
ERROR log output
"""
try:
sop_node = correction_dom.getElementsByTagName('SOPNode')[0]
slope_node = sop_node.getElementsByTagName('Slope')[0]
offset_node = sop_node.getElementsByTagName('Offset')[0]
power_node = sop_node.getElementsByTagName('Power')[0]
self.slope = ColorCorrection.get_float_tuple_from_node(slope_node)
self.offset = ColorCorrection.get_float_tuple_from_node(offset_node)
self.power = ColorCorrection.get_float_tuple_from_node(power_node)
sat_node = correction_dom.getElementsByTagName('SatNode')[0]
saturation_node = sat_node.getElementsByTagName('Saturation')[0]
self.saturation = ColorCorrection.get_float_from_node(saturation_node)
self._id = ColorCorrection.get_attribute_value_by_name(correction_dom,"id")
except Exception as exception:
logging.log(logging.ERROR, "Unable to process Color Correction ")
logging.log(logging.ERROR, exception)
logging.log(logging.ERROR, traceback.format_exc())
@staticmethod
def get_attribute_value_by_name(node,attribute_name):
value = None
for k in node.attributes.keys():
if str(k) == str(attribute_name):
value = node.attributes[k].value
return value
@staticmethod
def get_float_from_node(node):
"""
Returns the floating point number contained within the node passed to it
"""
children = node.childNodes
rchildren = []
for node in children:
if node.nodeType == node.TEXT_NODE:
rchildren.append(node.data)
return float(''.join(rchildren))
@staticmethod
def get_float_tuple_from_node(node):
"""
Returns the floating point numbers as a list contained within the node
passed to it
"""
children = node.childNodes
rchildren = []
for node in children:
if node.nodeType == node.TEXT_NODE:
rchildren.append(node.data)
return tuple(ColorCorrection.get_float_tuple_from_text(''.join(rchildren)))
@staticmethod
def get_float_tuple_from_text(text_string):
"""
Returns the floating point numbers as a list contained within the text
passed to it
"""
values = text_string.split(' ')
for i in range(len(values)):
try:
values[i] = float(values[i])
except Exception as exception:
logging.log(logging.ERROR, "Error processing numbers for parameter")
logging.log(logging.ERROR, exception)
return tuple(values)
def set_correction_id(self,correction_id):
self._id = correction_id
def get_correction_id(self):
return self._id
correction_id = property(get_correction_id, set_correction_id)
@property
def filename(self):
return self._source_file
@property
def slope(self):
return self._slope
@slope.setter
def slope(self, value):
self._slope = value
@property
def power(self):
return self._power
@power.setter
def power(self, value):
self._power = value
@property
def offset(self):
return self._offset
@offset.setter
def offset(self, value):
self._offset = value
@property
def saturation(self):
return self._saturation
@saturation.setter
def saturation(self, value):
self._saturation = value
def __repr__(self):
"""
Returns a string representation of the color correction
"""
return ','.join((str(self.slope),
str(self.power),
str(self.offset),
str(self.saturation)))
| |
import numpy
import six
from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
def _logsumexp(a, xp, axis=None):
vmax = xp.amax(a, axis=axis, keepdims=True)
vmax += xp.log(xp.sum(xp.exp(a - vmax),
axis=axis, keepdims=True, dtype=a.dtype))
return xp.squeeze(vmax, axis=axis)
def _softmax(x, xp):
val = xp.exp(x - xp.amax(x, axis=1, keepdims=True))
val /= xp.sum(val, axis=1, keepdims=True)
return val
def _label_to_path(labels, blank_symbol, xp):
path = xp.full((len(labels), labels.shape[1] * 2 + 1),
blank_symbol, dtype=numpy.int32)
path[:, 1::2] = labels
return path
def _log_dot(prob, rr, xp):
return _logsumexp(prob + xp.swapaxes(rr, 1, 2), xp, axis=2)
def _activate(yseq, xp):
return [_softmax(y, xp) for y in yseq]
class ConnectionistTemporalClassification(function.Function):
"""The implementation of Connectionist Temporal Classfication loss functions.
To make it usable for real-world cases, this class has two policies below.
1. This class computes forward and backward variables in the log domain.
2. This class applies the softmax function to inputs. The Backward
values of CTC loss is often overflows. This is avoided by computing
backward values before the activation function is applied.
"""
def __init__(self, blank_symbol):
self.blank_symbol = blank_symbol
self.zero_padding = -10000000000.0
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 1)
l_type = in_types[0]
type_check.expect(l_type.dtype == numpy.int32)
x_basetype = in_types[1]
for i in six.moves.range(2, len(in_types)):
x_type = in_types[i]
type_check.expect(
x_type.dtype == numpy.float32,
x_type.shape == x_basetype.shape,
)
def log_matrix(self, x, xp):
if xp == numpy:
res = numpy.ma.log(x).filled(fill_value=self.zero_padding)
else:
create_recurrence_relation = cuda.cupy.ElementwiseKernel(
'T x, T e', 'T y',
'y = x == 0 ? e : log(x)',
'create_recurrence_relation')
res = create_recurrence_relation(x, self.zero_padding)
return res
def recurrence_relation(self, size, dtype, xp):
"""Transition in forword and backword algorithms is represented as matrix.
See also
https://blog.wtf.sg/2014/10/06/connectionist-temporal-classification-ctc-with-theano/
"""
rr = (xp.eye(size, dtype=dtype) +
xp.eye(size, k=1, dtype=dtype) +
xp.eye(size, k=2, dtype=dtype) *
(xp.arange(size, dtype=dtype) % dtype(2)))
return self.log_matrix(rr, xp)
# path probablity to label probability
def label_probability(self, label_size, path, multiply, xp):
labels_prob = self.log_matrix(xp.zeros((len(path), label_size),
dtype=multiply.dtype), xp)
if xp == numpy:
for b in six.moves.range(len(path)):
chars = {c for c in path[b]}
for c in chars:
labels_prob[b, c] = _logsumexp(
multiply[b, path[b] == c], numpy)
else:
cuda.cupy.ElementwiseKernel(
'raw T x, raw I y, I b_max, I c_max',
'T z',
'''
T value = z;
I c = i % b_max, b = i / b_max;
int ind[2] = {b, -1};
for (int index = 0; index < c_max; ++index) {
ind[1] = index;
if (y[ind] == c) {
T xvalue = x[ind];
if (value > xvalue) {
value = value + log(1 + exp(xvalue - value));
} else {
value = xvalue + log(1 + exp(value - xvalue));
}
}
z = value;
}
''',
'reduce_probability')(multiply, path, labels_prob.shape[1],
path.shape[1], labels_prob)
return labels_prob
def calc_trans(self, path, yseq, rr, xp):
forward_prob = self.log_matrix(
xp.eye(path.shape[1], dtype='f')[0], xp)[None, :]
backward_prob = forward_prob
offset = xp.arange(
0, yseq[0].size, yseq[0].shape[1], dtype=path.dtype)[:, None]
# prob[i] := forward[i] + backward[-i-1]
prob = []
index = offset + path
for y in yseq:
# calc forward probability in log scale
forward_prob = xp.take(y, index) + _log_dot(
forward_prob[:, None, :], rr, xp)
prob.append(forward_prob)
r_index = offset + path[:, ::-1]
for i, y_inv in enumerate(yseq[::-1]):
# calc backward probability
backward_prob = _log_dot(backward_prob[:, None, :], rr, xp)
prob[-i - 1] += backward_prob[:, ::-1]
backward_prob = xp.take(y_inv, r_index) + backward_prob
return prob
def forward(self, inputs):
xp = cuda.get_array_module(inputs[0])
batch_size = len(inputs[0])
self.yseq = _activate(inputs[1::], xp)
log_yseq = [self.log_matrix(y, xp) for y in self.yseq]
self.path = _label_to_path(inputs[0], self.blank_symbol, xp)
rr = self.recurrence_relation(
self.path.shape[1], numpy.float32, xp)[None, :, :]
self.prob_trans = self.calc_trans(self.path, log_yseq, rr, xp)
loss = utils.force_array(xp.sum(
_logsumexp(self.prob_trans[-1], xp, axis=1)))
loss /= -batch_size
return loss,
def backward(self, inputs, grad_output):
xp = cuda.get_array_module(inputs[0])
batch_size = len(inputs[0])
total_probability = _logsumexp(self.prob_trans[0], xp, axis=1)
scale = grad_output[0] / batch_size
for y, prob in zip(self.yseq, self.prob_trans):
label_prob = self.label_probability(
y.shape[1], self.path, prob, xp)
y -= xp.exp(label_prob - total_probability[:, None])
y *= scale
return (None,) + tuple(self.yseq)
def connectionist_temporal_classification(x, t, blank_symbol):
"""Connectionist Temporal Classification loss function.
Connectionist Temporal Classification(CTC) [Graves2006]_ is a loss function
of sequence labeling where the alignment between the inputs and target is
unknown. See also [Graves2012]_
Args:
x (Variable): RNN output at each time.
(ex. :math:`(y_1, y_2, ..., y_T)`)
t (Variable): Expected label sequence.
blank_symbol (int): Index of blank_symbol.
This value must be non-negative.
Returns:
Variable: A variable holding a scalar value of the CTC loss.
.. note::
You need to input ``x`` without applying to activation functions(e.g.
softmax function), because this function applies softmax functions
to ``x`` before calculating CTC loss to avoid numerical limitations.
You also need to apply softmax function to fowarded values before you
decode it.
.. note::
This function is differentiable only by ``x``.
.. note::
This function supports (batch, sequence, 1-dimensional input)-data.
.. [Graves2006] Alex Graves, Santiago Fernandez,\
Faustino Gomez, Jurgen Schmidhuber,\
`Connectionist Temporal Classification: Labelling Unsegmented\
Sequence Data with Recurrent Neural Networks\
<ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf>`_
.. [Graves2012] Alex Graves,\
`Supervised Sequence Labelling with Recurrent Neural Networks\
<http://www.cs.toronto.edu/~graves/preprint.pdf>`_
"""
if not isinstance(blank_symbol, int):
raise TypeError('blank_symbol must be non-negative integer.')
assert blank_symbol >= 0
assert blank_symbol < x[0].data.shape[1]
# This implementation only supports 1-dimensional data.
# TODO(jnishi): Support d(>1)-dimentinal inputs.
assert(len(x[0].data.shape) == 2)
return ConnectionistTemporalClassification(blank_symbol)(t, *x)
| |
""" Cisco_IOS_XR_ncs1k_mxp_headless_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ncs1k\-mxp\-headless package operational data.
This module contains definitions
for the following management objects\:
headless\-func\-data\: Information related to headless
functionality
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class HeadlessFuncData(object):
"""
Information related to headless functionality
.. attribute:: ethernet_port_names
Ethernet Statistics collected during last headless operation
**type**\: :py:class:`EthernetPortNames <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_headless_oper.HeadlessFuncData.EthernetPortNames>`
.. attribute:: otn_port_names
OTN Statistics collected during last headless operation
**type**\: :py:class:`OtnPortNames <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_headless_oper.HeadlessFuncData.OtnPortNames>`
"""
_prefix = 'ncs1k-mxp-headless-oper'
_revision = '2016-09-13'
def __init__(self):
self.ethernet_port_names = HeadlessFuncData.EthernetPortNames()
self.ethernet_port_names.parent = self
self.otn_port_names = HeadlessFuncData.OtnPortNames()
self.otn_port_names.parent = self
class OtnPortNames(object):
"""
OTN Statistics collected during last headless
operation
.. attribute:: otn_port_name
port Name
**type**\: list of :py:class:`OtnPortName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_headless_oper.HeadlessFuncData.OtnPortNames.OtnPortName>`
"""
_prefix = 'ncs1k-mxp-headless-oper'
_revision = '2016-09-13'
def __init__(self):
self.parent = None
self.otn_port_name = YList()
self.otn_port_name.parent = self
self.otn_port_name.name = 'otn_port_name'
class OtnPortName(object):
"""
port Name
.. attribute:: name <key>
Port name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: headless_end_time
Headless End Time
**type**\: str
**length:** 0..64
.. attribute:: headless_start_time
Headless Start Time
**type**\: str
**length:** 0..64
.. attribute:: otn_statistics
OTN statistics
**type**\: :py:class:`OtnStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_headless_oper.HeadlessFuncData.OtnPortNames.OtnPortName.OtnStatistics>`
.. attribute:: started_stateful
Started Stateful
**type**\: bool
"""
_prefix = 'ncs1k-mxp-headless-oper'
_revision = '2016-09-13'
def __init__(self):
self.parent = None
self.name = None
self.headless_end_time = None
self.headless_start_time = None
self.otn_statistics = HeadlessFuncData.OtnPortNames.OtnPortName.OtnStatistics()
self.otn_statistics.parent = self
self.started_stateful = None
class OtnStatistics(object):
"""
OTN statistics
.. attribute:: fec_ec
FecEc
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: fec_uc
FecUc
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: sm_bei
SmBei
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: sm_bip
SmBip
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'ncs1k-mxp-headless-oper'
_revision = '2016-09-13'
def __init__(self):
self.parent = None
self.fec_ec = None
self.fec_uc = None
self.sm_bei = None
self.sm_bip = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ncs1k-mxp-headless-oper:otn-statistics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.fec_ec is not None:
return True
if self.fec_uc is not None:
return True
if self.sm_bei is not None:
return True
if self.sm_bip is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_headless_oper as meta
return meta._meta_table['HeadlessFuncData.OtnPortNames.OtnPortName.OtnStatistics']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/Cisco-IOS-XR-ncs1k-mxp-headless-oper:headless-func-data/Cisco-IOS-XR-ncs1k-mxp-headless-oper:otn-port-names/Cisco-IOS-XR-ncs1k-mxp-headless-oper:otn-port-name[Cisco-IOS-XR-ncs1k-mxp-headless-oper:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.name is not None:
return True
if self.headless_end_time is not None:
return True
if self.headless_start_time is not None:
return True
if self.otn_statistics is not None and self.otn_statistics._has_data():
return True
if self.started_stateful is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_headless_oper as meta
return meta._meta_table['HeadlessFuncData.OtnPortNames.OtnPortName']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ncs1k-mxp-headless-oper:headless-func-data/Cisco-IOS-XR-ncs1k-mxp-headless-oper:otn-port-names'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.otn_port_name is not None:
for child_ref in self.otn_port_name:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_headless_oper as meta
return meta._meta_table['HeadlessFuncData.OtnPortNames']['meta_info']
class EthernetPortNames(object):
"""
Ethernet Statistics collected during last
headless operation
.. attribute:: ethernet_port_name
Port Name
**type**\: list of :py:class:`EthernetPortName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_headless_oper.HeadlessFuncData.EthernetPortNames.EthernetPortName>`
"""
_prefix = 'ncs1k-mxp-headless-oper'
_revision = '2016-09-13'
def __init__(self):
self.parent = None
self.ethernet_port_name = YList()
self.ethernet_port_name.parent = self
self.ethernet_port_name.name = 'ethernet_port_name'
class EthernetPortName(object):
"""
Port Name
.. attribute:: name <key>
Port name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: ether_statistics
Ether Statistics
**type**\: :py:class:`EtherStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_headless_oper.HeadlessFuncData.EthernetPortNames.EthernetPortName.EtherStatistics>`
.. attribute:: headless_end_time
Headless End Time
**type**\: str
**length:** 0..64
.. attribute:: headless_start_time
Headless Start Time
**type**\: str
**length:** 0..64
.. attribute:: started_stateful
Started Stateful
**type**\: bool
"""
_prefix = 'ncs1k-mxp-headless-oper'
_revision = '2016-09-13'
def __init__(self):
self.parent = None
self.name = None
self.ether_statistics = HeadlessFuncData.EthernetPortNames.EthernetPortName.EtherStatistics()
self.ether_statistics.parent = self
self.headless_end_time = None
self.headless_start_time = None
self.started_stateful = None
class EtherStatistics(object):
"""
Ether Statistics
.. attribute:: rx8021q_pkt
Rx8021QPkt
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_bytes_good
RxBytesGood
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_error_jabbers
RxErrorJabbers
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_lldp_pkt
RxLldpPkt
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_packets
RxPackets
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pause
RxPause
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkt_drop
RxPktDrop
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts1024_to1518_bytes
RxPkts1024To1518Bytes
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts128to255_bytes
RxPkts128to255Bytes
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts256_to511_bytes
RxPkts256To511Bytes
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts512_to1023_bytes
RxPkts512To1023Bytes
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts64_bytes
RxPkts64Bytes
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts65_to127_bytes
RxPkts65To127Bytes
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts_bad_fcs
RxPktsBadFcs
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts_broadcast
RxPktsBroadcast
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts_good
RxPktsGood
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts_multicast
RxPktsMulticast
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts_over_sized
RxPktsOverSized
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts_under_sized
RxPktsUnderSized
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_pkts_unicast
RxPktsUnicast
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_recv_fragments
RxRecvFragments
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rx_total_bytes
RxTotalBytes
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_bad_fcs
TxBadFCS
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_bytes_good
TxBytesGood
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_fragments
TxFragments
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_jabber
TxJabber
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_packets
TxPackets
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_pause
TxPause
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_pkts_good
TxPktsGood
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_pkts_over_sized
TxPktsOverSized
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_pkts_under_sized
TxPktsUnderSized
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: tx_total_bytes
TxTotalBytes
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'ncs1k-mxp-headless-oper'
_revision = '2016-09-13'
def __init__(self):
self.parent = None
self.rx8021q_pkt = None
self.rx_bytes_good = None
self.rx_error_jabbers = None
self.rx_lldp_pkt = None
self.rx_packets = None
self.rx_pause = None
self.rx_pkt_drop = None
self.rx_pkts1024_to1518_bytes = None
self.rx_pkts128to255_bytes = None
self.rx_pkts256_to511_bytes = None
self.rx_pkts512_to1023_bytes = None
self.rx_pkts64_bytes = None
self.rx_pkts65_to127_bytes = None
self.rx_pkts_bad_fcs = None
self.rx_pkts_broadcast = None
self.rx_pkts_good = None
self.rx_pkts_multicast = None
self.rx_pkts_over_sized = None
self.rx_pkts_under_sized = None
self.rx_pkts_unicast = None
self.rx_recv_fragments = None
self.rx_total_bytes = None
self.tx_bad_fcs = None
self.tx_bytes_good = None
self.tx_fragments = None
self.tx_jabber = None
self.tx_packets = None
self.tx_pause = None
self.tx_pkts_good = None
self.tx_pkts_over_sized = None
self.tx_pkts_under_sized = None
self.tx_total_bytes = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ncs1k-mxp-headless-oper:ether-statistics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.rx8021q_pkt is not None:
return True
if self.rx_bytes_good is not None:
return True
if self.rx_error_jabbers is not None:
return True
if self.rx_lldp_pkt is not None:
return True
if self.rx_packets is not None:
return True
if self.rx_pause is not None:
return True
if self.rx_pkt_drop is not None:
return True
if self.rx_pkts1024_to1518_bytes is not None:
return True
if self.rx_pkts128to255_bytes is not None:
return True
if self.rx_pkts256_to511_bytes is not None:
return True
if self.rx_pkts512_to1023_bytes is not None:
return True
if self.rx_pkts64_bytes is not None:
return True
if self.rx_pkts65_to127_bytes is not None:
return True
if self.rx_pkts_bad_fcs is not None:
return True
if self.rx_pkts_broadcast is not None:
return True
if self.rx_pkts_good is not None:
return True
if self.rx_pkts_multicast is not None:
return True
if self.rx_pkts_over_sized is not None:
return True
if self.rx_pkts_under_sized is not None:
return True
if self.rx_pkts_unicast is not None:
return True
if self.rx_recv_fragments is not None:
return True
if self.rx_total_bytes is not None:
return True
if self.tx_bad_fcs is not None:
return True
if self.tx_bytes_good is not None:
return True
if self.tx_fragments is not None:
return True
if self.tx_jabber is not None:
return True
if self.tx_packets is not None:
return True
if self.tx_pause is not None:
return True
if self.tx_pkts_good is not None:
return True
if self.tx_pkts_over_sized is not None:
return True
if self.tx_pkts_under_sized is not None:
return True
if self.tx_total_bytes is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_headless_oper as meta
return meta._meta_table['HeadlessFuncData.EthernetPortNames.EthernetPortName.EtherStatistics']['meta_info']
@property
def _common_path(self):
if self.name is None:
raise YPYModelError('Key property name is None')
return '/Cisco-IOS-XR-ncs1k-mxp-headless-oper:headless-func-data/Cisco-IOS-XR-ncs1k-mxp-headless-oper:ethernet-port-names/Cisco-IOS-XR-ncs1k-mxp-headless-oper:ethernet-port-name[Cisco-IOS-XR-ncs1k-mxp-headless-oper:name = ' + str(self.name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.name is not None:
return True
if self.ether_statistics is not None and self.ether_statistics._has_data():
return True
if self.headless_end_time is not None:
return True
if self.headless_start_time is not None:
return True
if self.started_stateful is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_headless_oper as meta
return meta._meta_table['HeadlessFuncData.EthernetPortNames.EthernetPortName']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ncs1k-mxp-headless-oper:headless-func-data/Cisco-IOS-XR-ncs1k-mxp-headless-oper:ethernet-port-names'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.ethernet_port_name is not None:
for child_ref in self.ethernet_port_name:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_headless_oper as meta
return meta._meta_table['HeadlessFuncData.EthernetPortNames']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ncs1k-mxp-headless-oper:headless-func-data'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.ethernet_port_names is not None and self.ethernet_port_names._has_data():
return True
if self.otn_port_names is not None and self.otn_port_names._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_headless_oper as meta
return meta._meta_table['HeadlessFuncData']['meta_info']
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 10:26:52 2012
@author: itchy
"""
import numpy as nmp
def spline1d(x_out, x_data, y_data, x_slope, y_slope, *args):
"""
SPLINE1D 1-D interpolation using Green's function for a spline in tension
SPLINE1D will find a spline-based curve using continuous curvature splines
in tension (if set). The algorithm uses the Green's function for the spline.
You can supply data constrains, slope constrains, or a mix of both.
Solution can be evaluated at arbitrary locations
Use one of the following 3 call formats:
y = spline1d (x_out, x_data, y_data, x_slope, y_slope)
y = spline1d (x_out, x_data, y_data, x_slope, y_slope, t)
y = spline1d (x_out, x_data, y_data, x_slope, y_slope, t, cutoff)
The input parameters are:
x_out - Desired output x positions
x_data - coordinates of points with data constraints
y_data - data constraints at the above points
x_slope - coordinates of points with slope constraints
y_slope - slope constraints at the above points
t - tension to use, 0 <= t <= 1
if t is a vector of length 2 the second value is taken as the lengthscale
cutoff - if set, eigenvalues whose ratio to the maximum eigenvalue are smaller
than cutoff are zeroed out before the curve is evaluated.
One of (x_data, y_data) and (x_slope, y_slope) can be ([], [])
t, if not set, defaults to 0 (cubic spline). t = 1 gives linear interpolation
The loutput values are:
y - the interpolation
l - optionally, the eigenvalues of the linear system
See Wessel, P, D. Bercovici, 1998, Gridding with Splines in Tension : A
Green function Approach, Math. Geol., 30, 77-93.
Ported to Python by Richard Styron, June 2012
"""
# Pick a reasonable(?) lengthscale
length_scale = (nmp.amax(x_out) - nmp.amin(x_out)) / 50.
if len(args) == 0: # no tension selected, set default
t = 0.
elif len(args) == 1: # cutoff not set
t = args
cutoff = 0.
elif len(args) == 2:
t = args[0]
cutoff = args[1]
t = nmp.array([t])
cutoff = 0.
nt = len(t)
if nt == 2: # user gave both tension and lengthscale
length_scale = t[1]
t = t[0]
# TODO: Add error/exception for values of t outside of [0,1]
# Misc initializations
if t < 1:
p = nmp.sqrt(t / (1 - t))
p = p / length_scale
n0 = 0
n1 = 1
# First we must enforce the use of column vectors for the data constraints
# FIX THIS: some_vector.shape unpacks to (m,) instead of (m,1)
x_out = nmp.matrix(x_out)
[m,n] = x_out.shape
if m < n:
#x_out = x_out.reshape(x_out.shape[0], -1)
x_out = x_out.T
x_data = nmp.matrix(x_data)
[m,n] = x_data.shape
if m < n:
#x_data = x_data.reshape(x_data.shape[0], -1)
x_data = x_data.T
y_data = nmp.matrix(y_data)
[m,n] = y_data.shape
if m < n:
#y_data = y_data.reshape(y_data.shape[0], -1)
y_data = y_data.T
[n0, m0] = y_data.shape
x_slope = nmp.mat(x_slope)
[m,n] = x_slope.shape
if m < n:
#x_slope = x_slope.reshape(x_slope.shape[0], -1)
x_slope = x_slope.T
y_slope = nmp.mat(y_slope)
[m,n] = y_slope.shape
if m < n:
#y_slope = y_slope.reshape(y_slope.shape[0], -1)
y_slope = y_slope.T
#n1 = len(x_slope)
n1 = 0
# Assembly final xp, yp vectors (possibly combination of data and slopes)
#xp = nmp.array([[x_data] , [x_slope]])
#yp = nmp.array([[y_data] , [y_slope]])
# TODO: fix slope constrain problems by putting an 'if' statement here:
# if slopes exist, add to the vectors
xp = nmp.matrix(x_data)
yp = nmp.matrix(y_data)
# Now build the square n x n linear system that must be solved for the alpha's
n = n0 + n1
A = nmp.zeros((n, n))
for i in nmp.arange(0,n0): # First add equations for data constraints
r = xp[i] - xp
ar = nmp.abs(r)
if t == 0:
B = (ar ** 3)
A[i,:] = B.T
elif t == 1:
B = (ar)
A[i,:] = B.T
else:
B = nmp.exp(nmp.multiply(-p, ar)) + nmp.multiply(p, ar)
A[i,:] = B.T
if n1 > 0:
for i in nmp.arange(0,n1): # Then add equations for slope constraints
j = i + n0
r = xp[j] - xp
ar = nmp.abs(r)
if t == 0:
B = 3.0 * (r * ar)
A[j,:] = B.T
elif t == 1:
B = nmp.sign(r)
A[j,:] = B.T
# Done building square linear system, now solve it
# TODO: fix for cutoff > 0.0 -- deal with nargout for SVD
if cutoff > 0.0: # solve using SVD
# U, S, V = svd(A)
# s = nmp.diag(S)
# if
pass
else:
alpha = nmp.linalg.solve(A, yp)
# Now evaluate final solution at output locations
y = nmp.zeros((len(x_out),m0))
for i in nmp.arange(0,n):
r = xp[i] - x_out
ar = nmp.abs(r)
if t == 0:
y = y + (ar ** 3) * alpha[i,:]
elif t == 1:
y = y + ar * alpha[i,:]
else:
B = nmp.exp(nmp.multiply(-p, ar)) + nmp.multiply(p, ar)
y = y + nmp.multiply(B, alpha[i,:])
y = nmp.array(y.T)
y = y[0,:]
return y
| |
# -*- coding: utf-8 -*-
'''
Management of the windows update agent
======================================
.. versionadded:: 2014.7.0
Set windows updates to run by category. Default behavior is to install
all updates that do not require user interaction to complete.
Optionally set ``category`` to a category of your choice to only
install certain updates. Default is to set to install all available updates.
The following example will install all Security and Critical Updates,
and download but not install standard updates.
.. code-block:: yaml
updates:
win_update.installed:
- categories:
- 'Critical Updates'
- 'Security Updates'
- skips:
- downloaded
win_update.downloaded:
- categories:
- 'Updates'
- skips:
- downloaded
You can also specify a number of features about the update to have a
fine grain approach to specific types of updates. These are the following
features/states of updates available for configuring:
.. code-block:: text
'UI' - User interaction required, skipped by default
'downloaded' - Already downloaded, included by default
'present' - Present on computer, skipped by default
'installed' - Already installed, skipped by default
'reboot' - Reboot required, included by default
'hidden' - Skip updates that have been hidden, skipped by default
'software' - Software updates, included by default
'driver' - driver updates, included by default
The following example installs all driver updates that don't require a reboot:
.. code-block:: yaml
gryffindor:
win_update.installed:
- skips:
- driver: True
- software: False
- reboot: False
To just update your windows machine, add this your sls:
.. code-block:: yaml
updates:
win_update.installed
'''
# Import Python libs
from __future__ import absolute_import
import logging
# Import 3rd-party libs
# pylint: disable=import-error
from salt.ext.six.moves import range # pylint: disable=redefined-builtin
try:
import win32com.client
import pythoncom
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
# pylint: enable=import-error
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows() and HAS_DEPENDENCIES:
return True
return False
def _gather_update_categories(updateCollection):
'''
this is a convenience method to gather what categories of updates are available in any update
collection it is passed. Typically though, the download_collection.
Some known categories:
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
'''
categories = []
for i in range(updateCollection.Count):
update = updateCollection.Item(i)
for j in range(update.Categories.Count):
name = update.Categories.Item(j).Name
if name not in categories:
log.debug('found category: {0}'.format(name))
categories.append(name)
return categories
class PyWinUpdater(object):
def __init__(self, categories=None, skipUI=True, skipDownloaded=False,
skipInstalled=True, skipReboot=False, skipPresent=False,
skipSoftwareUpdates=False, skipDriverUpdates=False, skipHidden=True):
log.debug('CoInitializing the pycom system')
pythoncom.CoInitialize()
# pylint: disable=invalid-name
self.skipUI = skipUI
self.skipDownloaded = skipDownloaded
self.skipInstalled = skipInstalled
self.skipReboot = skipReboot
self.skipPresent = skipPresent
self.skipHidden = skipHidden
self.skipSoftwareUpdates = skipSoftwareUpdates
self.skipDriverUpdates = skipDriverUpdates
self.categories = categories
self.foundCategories = None
# pylint: enable=invalid-name
log.debug('dispatching update_session to keep the session object.')
self.update_session = win32com.client.Dispatch('Microsoft.Update.Session')
log.debug('update_session got. Now creating a win_searcher to seek out the updates')
self.win_searcher = self.update_session.CreateUpdateSearcher()
# list of updates that are applicable by current settings.
self.download_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
# list of updates to be installed.
self.install_collection = win32com.client.Dispatch('Microsoft.Update.UpdateColl')
# the object responsible for fetching the actual downloads.
self.win_downloader = self.update_session.CreateUpdateDownloader()
self.win_downloader.Updates = self.download_collection
# the object responsible for the installing of the updates.
self.win_installer = self.update_session.CreateUpdateInstaller()
self.win_installer.Updates = self.install_collection
# the results of the download process
self.download_results = None
# the results of the installation process
self.install_results = None
def Search(self, searchString):
try:
log.debug('beginning search of the passed string: {0}'.format(searchString))
self.search_results = self.win_searcher.Search(searchString)
log.debug('search completed successfully.')
except Exception as exc:
log.info('search for updates failed. {0}'.format(exc))
return exc
log.debug('parsing results. {0} updates were found.'.format(
self.search_results.Updates.Count))
try:
for update in self.search_results.Updates:
if update.InstallationBehavior.CanRequestUserInput:
log.debug('Skipped update {0}'.format(update))
continue
for category in update.Categories:
if self.skipDownloaded and update.IsDownloaded:
continue
if self.categories is None or category.Name in self.categories:
self.download_collection.Add(update)
log.debug('added update {0}'.format(update))
self.foundCategories = _gather_update_categories(self.download_collection)
return True
except Exception as exc:
log.info('parsing updates failed. {0}'.format(exc))
return exc
def AutoSearch(self):
search_string = ''
searchParams = []
if self.skipInstalled:
searchParams.append('IsInstalled=0')
else:
searchParams.append('IsInstalled=1')
if self.skipHidden:
searchParams.append('IsHidden=0')
else:
searchParams.append('IsHidden=1')
if self.skipReboot:
searchParams.append('RebootRequired=0')
else:
searchParams.append('RebootRequired=1')
if self.skipPresent:
searchParams.append('IsPresent=0')
else:
searchParams.append('IsPresent=1')
if len(searchParams) > 1:
for i in searchParams:
search_string += '{0} and '.format(i)
else:
search_string += '{0} and '.format(searchParams[1])
if not self.skipSoftwareUpdates and not self.skipDriverUpdates:
search_string += 'Type=\'Software\' or Type=\'Driver\''
elif not self.skipSoftwareUpdates:
search_string += 'Type=\'Software\''
elif not self.skipDriverUpdates:
search_string += 'Type=\'Driver\''
else:
return False
# if there is no type, the is nothing to search.
log.debug('generated search string: {0}'.format(search_string))
return self.Search(search_string)
def Download(self):
try:
if self.download_collection.Count != 0:
self.download_results = self.win_downloader.Download()
else:
log.debug('Skipped downloading, all updates were already cached.')
return True
except Exception as exc:
log.debug('failed in the downloading {0}.'.format(exc))
return exc
def Install(self):
try:
for update in self.search_results.Updates:
if update.IsDownloaded:
self.install_collection.Add(update)
log.debug('Updates prepared. beginning installation')
except Exception as exc:
log.info('Preparing install list failed: {0}'.format(exc))
return exc
if self.install_collection.Count != 0:
log.debug('Install list created, about to install')
updates = []
try:
self.install_results = self.win_installer.Install()
log.info('Installation of updates complete')
return True
except Exception as exc:
log.info('Installation failed: {0}'.format(exc))
return exc
else:
log.info('no new updates.')
return True
def GetInstallationResults(self):
log.debug('bluger has {0} updates in it'.format(self.install_collection.Count))
updates = []
if self.install_collection.Count == 0:
return {}
for i in range(self.install_collection.Count):
updates.append('{0}: {1}'.format(
self.install_results.GetUpdateResult(i).ResultCode,
self.install_collection.Item(i).Title))
log.debug('Update results enumerated, now making a list to pass back')
results = {}
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
log.debug('Update information complied. returning')
return results
def GetDownloadResults(self):
updates = []
for i in range(self.download_collection.Count):
updates.append('{0}: {1}'.format(
self.download_results.GetUpdateResult(i).ResultCode,
self.download_collection.Item(i).Title))
results = {}
for i, update in enumerate(updates):
results['update {0}'.format(i)] = update
return results
def SetCategories(self, categories):
self.categories = categories
def GetCategories(self):
return self.categories
def GetAvailableCategories(self):
return self.foundCategories
def SetSkips(self, skips):
if skips:
for i in skips:
value = i[next(i.iterkeys())]
skip = next(i.iterkeys())
self.SetSkip(skip, value)
log.debug('was asked to set {0} to {1}'.format(skip, value))
def SetSkip(self, skip, state):
if skip == 'UI':
self.skipUI = state
elif skip == 'downloaded':
self.skipDownloaded = state
elif skip == 'installed':
self.skipInstalled = state
elif skip == 'reboot':
self.skipReboot = state
elif skip == 'present':
self.skipPresent = state
elif skip == 'hidden':
self.skipHidden = state
elif skip == 'software':
self.skipSoftwareUpdates = state
elif skip == 'driver':
self.skipDriverUpdates = state
log.debug('new search state: \n\tUI: {0}\n\tDownload: {1}\n\tInstalled: {2}\n\treboot :{3}\n\tPresent: {4}\n\thidden: {5}\n\tsoftware: {6}\n\tdriver: {7}'.format(
self.skipUI, self.skipDownloaded, self.skipInstalled, self.skipReboot,
self.skipPresent, self.skipHidden, self.skipSoftwareUpdates, self.skipDriverUpdates))
def _search(win_updater, retries=5):
passed = False
clean = True
comment = ''
while not passed:
log.debug('Searching. tries left: {0}'.format(retries))
passed = win_updater.AutoSearch()
log.debug('Done searching: {0}'.format(passed))
if isinstance(passed, Exception):
clean = False
comment += 'Failed in the seeking/parsing process:\n\t\t{0}\n'.format(passed)
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(retries)
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, True, retries)
passed = False
if clean:
comment += 'Search was done with out an error.\n'
return (comment, True, retries)
def _download(win_updater, retries=5):
passed = False
clean = True
comment = ''
while not passed:
log.debug('Downloading. tries left: {0}'.format(retries))
passed = win_updater.Download()
log.debug('Done downloading: {0}'.format(passed))
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to download updates:\n\t\t{0}\n'.format(passed)
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(retries)
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Download was done without error.\n'
return (comment, True, retries)
def _install(win_updater, retries=5):
passed = False
clean = True
comment = ''
while not passed:
log.debug('download_collection is this long: {0}'.format(win_updater.install_collection.Count))
log.debug('Installing. tries left: {0}'.format(retries))
passed = win_updater.Install()
log.info('Done installing: {0}'.format(passed))
if isinstance(passed, Exception):
clean = False
comment += 'Failed while trying to install the updates.\n\t\t{0}\n'.format(passed)
retries -= 1
if retries:
comment += '{0} tries to go. retrying\n'.format(retries)
passed = False
else:
comment += 'out of retries. this update round failed.\n'
return (comment, False, retries)
if clean:
comment += 'Install was done without error.\n'
return (comment, True, retries)
def installed(name, categories=None, skips=None, retries=10):
'''
Install specified windows updates.
name:
if categories is left empty, it will be assumed that you are passing the category option
through the name. These are separate because you can only have one name, but can have
multiple categories.
categories:
the list of categories to be downloaded. These are simply strings in the update's
information, so there is no enumeration of the categories available. Some known categories:
.. code-block:: text
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
skips:
a list of features of the updates to cull by. Available features:
.. code-block:: text
'UI' - User interaction required, skipped by default
'downloaded' - Already downloaded, skipped by default (downloading)
'present' - Present on computer, included by default (installing)
'installed' - Already installed, skipped by default
'reboot' - Reboot required, included by default
'hidden' - skip those updates that have been hidden.
'software' - Software updates, included by default
'driver' - driver updates, skipped by default
retries
Number of retries to make before giving up. This is total, not per
step.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if not categories:
categories = [name]
log.debug('categories to search for are: {0}'.format(categories))
win_updater = PyWinUpdater()
win_updater.SetCategories(categories)
win_updater.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
# this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
# this is where we put things in their place!
comment, passed, retries = _install(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
try:
ret['changes'] = win_updater.GetInstallationResults()
except Exception:
ret['comment'] += 'could not get results, but updates were installed.'
return ret
def downloaded(name, categories=None, skips=None, retries=10):
'''
Cache updates for later install.
name:
if categories is left empty, it will be assumed that you are passing the category option
through the name. These are separate because you can only have one name, but can have
multiple categories.
categories:
the list of categories to be downloaded. These are simply strings in the update's
information, so there is no enumeration of the categories available. Some known categories:
.. code-block:: text
Updates
Windows 7
Critical Updates
Security Updates
Update Rollups
skips:
a list of features of the updates to cull by. Available features:
.. code-block:: text
'UI' - User interaction required, skipped by default
'downloaded' - Already downloaded, skipped by default (downloading)
'present' - Present on computer, included by default (installing)
'installed' - Already installed, skipped by default
'reboot' - Reboot required, included by default
'hidden' - skip those updates that have been hidden.
'software' - Software updates, included by default
'driver' - driver updates, skipped by default
retries
Number of retries to make before giving up. This is total, not per
step.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if not categories:
categories = [name]
log.debug('categories to search for are: {0}'.format(categories))
win_updater = PyWinUpdater()
win_updater.SetCategories(categories)
win_updater.SetSkips(skips)
# this is where we be seeking the things! yar!
comment, passed, retries = _search(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
# this is where we get all the things! i.e. download updates.
comment, passed, retries = _download(win_updater, retries)
ret['comment'] += comment
if not passed:
ret['result'] = False
return ret
try:
ret['changes'] = win_updater.GetDownloadResults()
except Exception:
ret['comment'] += 'could not get results, but updates were downloaded.'
return ret
| |
# -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from . import subtitles
from .language import Language
from .utils import to_unicode
import enzyme.core
import guessit
import hashlib
import logging
import mimetypes
import os
import struct
from sickbeard import encodingKludge as ek
import sickbeard
__all__ = ['EXTENSIONS', 'MIMETYPES', 'Video', 'Episode', 'Movie', 'UnknownVideo',
'scan', 'hash_opensubtitles', 'hash_thesubdb']
logger = logging.getLogger("subliminal")
#: Video extensions
EXTENSIONS = ['.avi', '.mkv', '.mpg', '.mp4', '.m4v', '.mov', '.ogm', '.ogv', '.wmv',
'.divx', '.asf']
#: Video mimetypes
MIMETYPES = ['video/mpeg', 'video/mp4', 'video/quicktime', 'video/x-ms-wmv', 'video/x-msvideo',
'video/x-flv', 'video/x-matroska', 'video/x-matroska-3d']
class Video(object):
"""Base class for videos
:param string path: path
:param guess: guessed informations
:type guess: :class:`~guessit.guess.Guess`
:param string imdbid: imdbid
"""
def __init__(self, path, guess, imdbid=None):
self.release = path
self.guess = guess
self.imdbid = imdbid
self._path = None
self.hashes = {}
if os.path.exists(path):
self._path = path
self.size = os.path.getsize(self._path)
self._compute_hashes()
@classmethod
def from_path(cls, path):
"""Create a :class:`Video` subclass guessing all informations from the given path
:param string path: path
:return: video object
:rtype: :class:`Episode` or :class:`Movie` or :class:`UnknownVideo`
"""
guess = guessit.guess_file_info(path, 'autodetect')
result = None
if guess['type'] == 'episode' and 'series' in guess and 'season' in guess and 'episodeNumber' in guess:
title = None
if 'title' in guess:
title = guess['title']
result = Episode(path, guess['series'], guess['season'], guess['episodeNumber'], title, guess)
if guess['type'] == 'movie' and 'title' in guess:
year = None
if 'year' in guess:
year = guess['year']
result = Movie(path, guess['title'], year, guess)
if not result:
result = UnknownVideo(path, guess)
if not isinstance(result, cls):
raise ValueError('Video is not of requested type')
return result
@property
def exists(self):
"""Whether the video exists or not"""
if self._path:
return os.path.exists(self._path)
return False
@property
def path(self):
"""Path to the video"""
return self._path
@path.setter
def path(self, value):
if not os.path.exists(value):
raise ValueError('Path does not exists')
self._path = value
self.size = os.path.getsize(self._path)
self._compute_hashes()
def _compute_hashes(self):
"""Compute different hashes"""
self.hashes['OpenSubtitles'] = hash_opensubtitles(self.path)
self.hashes['TheSubDB'] = hash_thesubdb(self.path)
def scan(self):
"""Scan and return associated subtitles
:return: associated subtitles
:rtype: list of :class:`~subliminal.subtitles.Subtitle`
"""
if not self.exists:
return []
basepath = os.path.splitext(self.path)[0]
results = []
video_infos = None
try:
video_infos = enzyme.parse(self.path)
logger.debug(u'Succeeded parsing %s with enzyme: %r' % (self.path, video_infos))
except:
logger.debug(u'Failed parsing %s with enzyme' % self.path)
if isinstance(video_infos, enzyme.core.AVContainer):
results.extend([subtitles.EmbeddedSubtitle.from_enzyme(self.path, s) for s in video_infos.subtitles])
# cannot use glob here because it chokes if there are any square
# brackets inside the filename, so we have to use basic string
# startswith/endswith comparisons
folder, basename = os.path.split(basepath)
if folder == '':
folder = '.'
existing = [f for f in os.listdir(folder) if f.startswith(basename)]
if sickbeard.SUBTITLES_DIR:
subsDir = ek.ek(os.path.join, folder, sickbeard.SUBTITLES_DIR)
if ek.ek(os.path.isdir, subsDir):
existing.extend([f for f in os.listdir(subsDir) if f.startswith(basename)])
if sickbeard.SUBTITLES_DIR_SUB:
subsDir = os.path.join(os.path.dirname(self._path),"Subs")
if ek.ek(os.path.isdir, subsDir):
existing.extend([f for f in os.listdir(subsDir) if f.startswith(basename)])
for path in existing:
for ext in subtitles.EXTENSIONS:
if path.endswith(ext):
language = Language(path[len(basename) + 1:-len(ext)], strict=False)
results.append(subtitles.ExternalSubtitle(path, language))
return results
def __unicode__(self):
return to_unicode(self.path or self.release)
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self)
def __hash__(self):
return hash(self.path or self.release)
class Episode(Video):
"""Episode :class:`Video`
:param string path: path
:param string series: series
:param int season: season number
:param int episode: episode number
:param string title: title
:param guess: guessed informations
:type guess: :class:`~guessit.guess.Guess`
:param string tvdbid: tvdbid
:param string imdbid: imdbid
"""
def __init__(self, path, series, season, episode, title=None, guess=None, tvdbid=None, imdbid=None):
super(Episode, self).__init__(path, guess, imdbid)
self.series = series
self.title = title
self.season = season
self.episode = episode
self.tvdbid = tvdbid
class Movie(Video):
"""Movie :class:`Video`
:param string path: path
:param string title: title
:param int year: year
:param guess: guessed informations
:type guess: :class:`~guessit.guess.Guess`
:param string imdbid: imdbid
"""
def __init__(self, path, title, year=None, guess=None, imdbid=None):
super(Movie, self).__init__(path, guess, imdbid)
self.title = title
self.year = year
class UnknownVideo(Video):
"""Unknown video"""
pass
def scan(entry, max_depth=3, scan_filter=None, depth=0):
"""Scan a path for videos and subtitles
:param string entry: path
:param int max_depth: maximum folder depth
:param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``)
:param int depth: starting depth
:return: found videos and subtitles
:rtype: list of (:class:`Video`, [:class:`~subliminal.subtitles.Subtitle`])
"""
if depth > max_depth and max_depth != 0: # we do not want to search the whole file system except if max_depth = 0
return []
if os.path.isdir(entry): # a dir? recurse
logger.debug(u'Scanning directory %s with depth %d/%d' % (entry, depth, max_depth))
result = []
for e in os.listdir(entry):
result.extend(scan(os.path.join(entry, e), max_depth, scan_filter, depth + 1))
return result
if os.path.isfile(entry) or depth == 0:
logger.debug(u'Scanning file %s with depth %d/%d' % (entry, depth, max_depth))
if depth != 0: # trust the user: only check for valid format if recursing
if mimetypes.guess_type(entry)[0] not in MIMETYPES and os.path.splitext(entry)[1] not in EXTENSIONS:
return []
if scan_filter is not None and scan_filter(entry):
return []
video = Video.from_path(entry)
return [(video, video.scan())]
logger.warning(u'Scanning entry %s failed with depth %d/%d' % (entry, depth, max_depth))
return [] # anything else
def hash_opensubtitles(path):
"""Compute a hash using OpenSubtitles' algorithm
:param string path: path
:return: hash
:rtype: string
"""
longlongformat = 'q' # long long
bytesize = struct.calcsize(longlongformat)
with open(path, 'rb') as f:
filesize = os.path.getsize(path)
filehash = filesize
if filesize < 65536 * 2:
return None
for _ in range(65536 / bytesize):
filebuffer = f.read(bytesize)
(l_value,) = struct.unpack(longlongformat, filebuffer)
filehash += l_value
filehash = filehash & 0xFFFFFFFFFFFFFFFF # to remain as 64bit number
f.seek(max(0, filesize - 65536), 0)
for _ in range(65536 / bytesize):
filebuffer = f.read(bytesize)
(l_value,) = struct.unpack(longlongformat, filebuffer)
filehash += l_value
filehash = filehash & 0xFFFFFFFFFFFFFFFF
returnedhash = '%016x' % filehash
logger.debug(u'Computed OpenSubtitle hash %s for %s' % (returnedhash, path))
return returnedhash
def hash_thesubdb(path):
"""Compute a hash using TheSubDB's algorithm
:param string path: path
:return: hash
:rtype: string
"""
readsize = 64 * 1024
if os.path.getsize(path) < readsize:
return None
with open(path, 'rb') as f:
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
returnedhash = hashlib.md5(data).hexdigest()
logger.debug(u'Computed TheSubDB hash %s for %s' % (returnedhash, path))
return returnedhash
| |
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.core import mail
from django.test import TestCase
from django.utils import timezone
from ..models import *
from ...core import permissions as perms
import datetime
import json
class UserStoriesTests(TestCase):
def setUp(self):
self.now_date = datetime.datetime.now(tz=timezone.get_default_timezone())
self.user1 = User.objects.create(
username = 'test1',
email = 'test1@test.com',
is_active = True,
is_staff = True,
is_superuser = True,
)
self.user1.set_password("test")
self.user1.save()
self.user2 = User.objects.create(
username = 'test2',
email = 'test2@test.com',
is_active = True,
is_staff = False,
is_superuser = False,
password = self.user1.password,
)
self.user3 = User.objects.create(
username = 'test3',
email = 'test3@test.com',
is_active = True,
is_staff = False,
is_superuser = False,
password = self.user1.password,
)
self.project1 = Project.objects\
.create(name='test1', description='test1', owner=self.user1, slug='test1')
self.project2 = Project.objects\
.create(name='test2', description='test2', owner=self.user2, slug='test2')
self.project1.add_user(self.user1, 'developer')
self.project2.add_user(self.user2, 'developer')
self.project2.add_user(self.user3, 'developer')
self.milestone1 = Milestone.objects.create(
project = self.project1,
owner = self.user1,
name = 'test1 milestone',
estimated_finish = self.now_date + datetime.timedelta(20),
)
self.milestone2 = Milestone.objects.create(
project = self.project2,
owner = self.user2,
name = 'test2 milestone',
estimated_finish = self.now_date + datetime.timedelta(20),
)
mail.outbox = []
def tearDown(self):
self.milestone1.delete()
self.milestone2.delete()
self.project1.delete()
self.project2.delete()
self.user2.delete()
self.user1.delete()
def test_backlog_simple_view(self):
self.client.login(username="test2", password="test")
response = self.client.get(self.project2.get_backlog_url())
self.assertEqual(response.status_code, 200)
def test_backlog_simple_view_without_permissions(self):
self.client.login(username="test2", password="test")
response = self.client.get(self.project1.get_backlog_url())
self.assertEqual(response.status_code, 403)
def test_user_story_create_view_page(self):
self.client.login(username="test2", password="test")
response = self.client.get(self.milestone2.get_user_story_create_url())
self.assertEqual(response.status_code, 200)
def test_user_story_create_view_page_without_permissions(self):
self.client.login(username="test2", password="test")
response = self.client.get(self.milestone1.get_user_story_create_url())
self.assertEqual(response.status_code, 403)
def test_user_story_create_with_milestone(self):
self.client.login(username="test2", password="test")
post_params = {
'priority': 3,
'points': '10',
'status': 'open',
'category': '',
'tested': False,
'subject': 'test us',
'description': 'test desc us',
'finish_date': '02/02/2012',
}
response = self.client.post(self.milestone2.get_user_story_create_url(), post_params, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain, [('http://testserver/test2/backlog/', 302)])
self.assertEqual(self.milestone2.user_stories.count(), 1)
self.assertEqual(self.project2.user_stories.count(), 1)
self.assertEqual(self.project2.tasks.count(), 0)
self.assertEqual(len(mail.outbox), 1)
def test_user_story_create(self):
self.client.login(username="test2", password="test")
post_params = {
'priority': 3,
'points': '10',
'status': 'open',
'category': '',
'tested': False,
'subject': 'test us',
'description': 'test desc us',
'finish_date': '02/02/2012',
}
response = self.client.post(self.project2.get_userstory_create_url(), post_params, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain, [('http://testserver/test2/backlog/', 302)])
self.assertEqual(self.milestone2.user_stories.count(), 0)
self.assertEqual(self.project2.user_stories.count(), 1)
self.assertEqual(self.project2.tasks.count(), 0)
self.assertEqual(len(mail.outbox), 1)
def test_user_story_create_bad_status(self):
self.client.login(username="test2", password="test")
post_params = {
'priority': 6,
'points': '10',
'category': '',
'subject': 'test us',
'description': '',
'finish_date': '02/02/2012',
}
response = self.client.post(self.milestone2.get_user_story_create_url(), post_params, follow=True)
self.assertIn("form", response.context)
form_errors = dict(response.context['form'].errors)
self.assertIn('description', form_errors)
self.assertEqual(response.status_code, 200)
def test_user_story_create_without_permissions(self):
self.client.login(username="test2", password="test")
post_params = {
'priority': 6,
'points': '10',
'status': 'foo',
'category': '',
'tested': False,
'subject': 'test us',
'description': 'test desc us',
'finish_date': '02/02/2012',
}
response = self.client.post(self.milestone1.get_user_story_create_url(), post_params, follow=True)
self.assertEqual(response.status_code, 403)
def test_user_story_view(self):
self.client.login(username="test2", password="test")
user_story = UserStory.objects.create(
priority = '6',
status = 'open',
category = '',
tested = False,
finish_date = self.now_date,
subject = 'test us',
description = 'test desc us',
owner = self.user2,
project = self.project2,
milestone = self.milestone2,
)
response = self.client.get(user_story.get_view_url())
self.assertEqual(response.status_code, 200)
def test_user_story_edit(self):
self.client.login(username="test2", password="test")
user_story = UserStory.objects.create(
priority = '6',
status = 'open',
category = '',
tested = False,
finish_date = self.now_date,
subject = 'test us',
description = 'test desc us',
owner = self.user2,
project = self.project2,
milestone = self.milestone2,
)
post_params = {
'priority': 6,
'points': '10',
'category': '',
'subject': 'test us foo',
'description': 'test desc us',
'finish_date': '02/02/2012',
}
response = self.client.post(user_story.get_edit_url(), post_params, follow=True)
self.assertEqual(response.status_code, 200)
user_story = UserStory.objects.get(pk=user_story.pk)
self.assertEqual(user_story.subject, 'test us foo')
self.assertEqual(user_story.status, 'open')
def test_user_story_edit_without_permission(self):
self.client.login(username="test2", password="test")
user_story = UserStory.objects.create(
priority = '6',
status = 'open',
category = '',
tested = False,
finish_date = self.now_date,
subject = 'test us',
description = 'test desc us',
owner = self.user1,
project = self.project1,
milestone = self.milestone1,
)
post_params = {
'priority': 6,
'points': '10',
'status': 'progress',
'category': '',
'tested': False,
'subject': 'test us foo',
'description': 'test desc us',
'finish_date': '02/02/2012',
}
response = self.client.post(user_story.get_edit_url(), post_params, follow=True)
self.assertEqual(response.status_code, 403)
def test_assign_user_story(self):
self.client.login(username="test2", password="test")
user_story = UserStory.objects.create(
priority = '6',
status = 'open',
category = '',
tested = False,
finish_date = self.now_date,
subject = 'test us',
description = 'test desc us',
owner = self.user2,
project = self.project2,
milestone = None,
)
response = self.client.post(user_story.get_assign_url(), {'mid': self.milestone2.id})
self.assertEqual(response.status_code, 200)
user_story = UserStory.objects.get(pk=user_story.pk)
self.assertEqual(user_story.milestone, self.milestone2)
def test_unassign_user_story(self):
self.client.login(username="test2", password="test")
user_story = UserStory.objects.create(
priority = '6',
status = 'open',
category = '',
tested = False,
finish_date = self.now_date,
subject = 'test us',
description = 'test desc us',
owner = self.user2,
project = self.project2,
milestone = self.milestone2,
)
response = self.client.post(user_story.get_unassign_url(), {})
self.assertEqual(response.status_code, 200)
user_story = UserStory.objects.get(pk=user_story.pk)
self.assertEqual(user_story.milestone, None)
def test_assign_user_story_without_permissions(self):
self.client.login(username="test2", password="test")
user_story = UserStory.objects.create(
priority = '6',
status = 'open',
category = '',
tested = False,
finish_date = self.now_date,
subject = 'test us',
description = 'test desc us',
owner = self.user1,
project = self.project1,
milestone = None,
)
response = self.client.post(user_story.get_assign_url(), {'mid': self.milestone2.id})
self.assertEqual(response.status_code, 403)
user_story = UserStory.objects.get(pk=user_story.pk)
self.assertEqual(user_story.milestone, None)
def test_unassign_user_story_without_permissions(self):
self.client.login(username="test2", password="test")
user_story = UserStory.objects.create(
priority = '6',
status = 'open',
category = '',
tested = False,
finish_date = self.now_date,
subject = 'test us',
description = 'test desc us',
owner = self.user1,
project = self.project1,
milestone = self.milestone1,
)
response = self.client.post(user_story.get_unassign_url(), {})
self.assertEqual(response.status_code, 403)
user_story = UserStory.objects.get(pk=user_story.pk)
self.assertEqual(user_story.milestone, self.milestone1)
def test_user_story_delete(self):
self.client.login(username="test2", password="test")
user_story = UserStory.objects.create(
priority = '6',
status = 'open',
category = '',
tested = False,
finish_date = self.now_date,
subject = 'test us',
description = 'test desc us',
owner = self.user2,
project = self.project2,
milestone = self.milestone2,
)
response = self.client.post(user_story.get_delete_url(), {})
self.assertEqual(response.status_code, 200)
jdata = json.loads(response.content)
self.assertIn("valid", jdata)
self.assertTrue(jdata['valid'])
| |
from threading import local
from django.template import Context, Template, TemplateSyntaxError
from django.test import SimpleTestCase, override_settings
from django.utils import translation
from django.utils.safestring import mark_safe
from django.utils.translation import trans_real
from ...utils import setup
from .base import MultipleLocaleActivationTestCase, extended_locale_paths
class I18nTransTagTests(SimpleTestCase):
libraries = {'i18n': 'django.templatetags.i18n'}
@setup({'i18n01': '{% load i18n %}{% trans \'xxxyyyxxx\' %}'})
def test_i18n01(self):
"""simple translation of a string delimited by '."""
output = self.engine.render_to_string('i18n01')
self.assertEqual(output, 'xxxyyyxxx')
@setup({'i18n02': '{% load i18n %}{% trans "xxxyyyxxx" %}'})
def test_i18n02(self):
"""simple translation of a string delimited by "."""
output = self.engine.render_to_string('i18n02')
self.assertEqual(output, 'xxxyyyxxx')
@setup({'i18n06': '{% load i18n %}{% trans "Page not found" %}'})
def test_i18n06(self):
"""simple translation of a string to German"""
with translation.override('de'):
output = self.engine.render_to_string('i18n06')
self.assertEqual(output, 'Seite nicht gefunden')
@setup({'i18n09': '{% load i18n %}{% trans "Page not found" noop %}'})
def test_i18n09(self):
"""simple non-translation (only marking) of a string to German"""
with translation.override('de'):
output = self.engine.render_to_string('i18n09')
self.assertEqual(output, 'Page not found')
@setup({'i18n20': '{% load i18n %}{% trans andrew %}'})
def test_i18n20(self):
output = self.engine.render_to_string('i18n20', {'andrew': 'a & b'})
self.assertEqual(output, 'a & b')
@setup({'i18n22': '{% load i18n %}{% trans andrew %}'})
def test_i18n22(self):
output = self.engine.render_to_string('i18n22', {'andrew': mark_safe('a & b')})
self.assertEqual(output, 'a & b')
@setup({'i18n23': '{% load i18n %}{% trans "Page not found"|capfirst|slice:"6:" %}'})
def test_i18n23(self):
"""Using filters with the {% trans %} tag (#5972)."""
with translation.override('de'):
output = self.engine.render_to_string('i18n23')
self.assertEqual(output, 'nicht gefunden')
@setup({'i18n24': '{% load i18n %}{% trans \'Page not found\'|upper %}'})
def test_i18n24(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n24')
self.assertEqual(output, 'SEITE NICHT GEFUNDEN')
@setup({'i18n25': '{% load i18n %}{% trans somevar|upper %}'})
def test_i18n25(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n25', {'somevar': 'Page not found'})
self.assertEqual(output, 'SEITE NICHT GEFUNDEN')
# trans tag with as var
@setup({'i18n35': '{% load i18n %}{% trans "Page not found" as page_not_found %}{{ page_not_found }}'})
def test_i18n35(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n35')
self.assertEqual(output, 'Seite nicht gefunden')
@setup({'i18n36': '{% load i18n %}'
'{% trans "Page not found" noop as page_not_found %}{{ page_not_found }}'})
def test_i18n36(self):
with translation.override('de'):
output = self.engine.render_to_string('i18n36')
self.assertEqual(output, 'Page not found')
@setup({'template': '{% load i18n %}{% trans %}A}'})
def test_syntax_error_no_arguments(self):
msg = "'trans' takes at least one argument"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% trans "Yes" badoption %}'})
def test_syntax_error_bad_option(self):
msg = "Unknown argument for 'trans' tag: 'badoption'"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% trans "Yes" as %}'})
def test_syntax_error_missing_assignment(self):
msg = "No argument provided to the 'trans' tag for the as option."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% trans "Yes" as var context %}'})
def test_syntax_error_missing_context(self):
msg = "No argument provided to the 'trans' tag for the context option."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% trans "Yes" context as var %}'})
def test_syntax_error_context_as(self):
msg = "Invalid argument 'as' provided to the 'trans' tag for the context option"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% trans "Yes" context noop %}'})
def test_syntax_error_context_noop(self):
msg = "Invalid argument 'noop' provided to the 'trans' tag for the context option"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% trans "Yes" noop noop %}'})
def test_syntax_error_duplicate_option(self):
msg = "The 'noop' option was specified more than once."
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
@setup({'template': '{% load i18n %}{% trans "%s" %}'})
def test_trans_tag_using_a_string_that_looks_like_str_fmt(self):
output = self.engine.render_to_string('template')
self.assertEqual(output, '%s')
class TranslationTransTagTests(SimpleTestCase):
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_template_tags_pgettext(self):
"""{% trans %} takes message contexts into account (#14806)."""
trans_real._active = local()
trans_real._translations = {}
with translation.override('de'):
# Nonexistent context...
t = Template('{% load i18n %}{% trans "May" context "nonexistent" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'May')
# Existing context... using a literal
t = Template('{% load i18n %}{% trans "May" context "month name" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context "verb" %}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Kann')
# Using a variable
t = Template('{% load i18n %}{% trans "May" context message_context %}')
rendered = t.render(Context({'message_context': 'month name'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context message_context %}')
rendered = t.render(Context({'message_context': 'verb'}))
self.assertEqual(rendered, 'Kann')
# Using a filter
t = Template('{% load i18n %}{% trans "May" context message_context|lower %}')
rendered = t.render(Context({'message_context': 'MONTH NAME'}))
self.assertEqual(rendered, 'Mai')
t = Template('{% load i18n %}{% trans "May" context message_context|lower %}')
rendered = t.render(Context({'message_context': 'VERB'}))
self.assertEqual(rendered, 'Kann')
# Using 'as'
t = Template('{% load i18n %}{% trans "May" context "month name" as var %}Value: {{ var }}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Value: Mai')
t = Template('{% load i18n %}{% trans "May" as var context "verb" %}Value: {{ var }}')
rendered = t.render(Context())
self.assertEqual(rendered, 'Value: Kann')
class MultipleLocaleActivationTransTagTests(MultipleLocaleActivationTestCase):
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n
constructs.
"""
with translation.override('fr'):
self.assertEqual(Template("{% load i18n %}{% trans 'Yes' %}").render(Context({})), 'Oui')
def test_multiple_locale_trans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override(self._old_language), translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_trans(self):
with translation.override('de', deactivate=True):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_trans(self):
with translation.override('de'):
t = Template("{% load i18n %}{% trans 'No' %}")
with translation.override('nl'):
self.assertEqual(t.render(Context({})), 'Nee')
| |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import logging
import os
import requests
import socket
import sys
import traceback
import docker
from neutronclient.common import exceptions as n_exceptions
from neutronclient.v2_0 import client as client_v2
import netaddr
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from pybrctl import pybrctl
import pyroute2
# from midonet_kubernetes import actions
import actions
import exceptions
LOG_PATH = '/var/log/midonet-kubernetes/plugin.log'
logging.basicConfig(filename=LOG_PATH, level=logging.DEBUG)
logger = logging.getLogger(__name__)
BINDING_EXECUTABLE = '/usr/bin/mm-ctl'
BIND = '--bind-port'
UNBIND = '--unbind-port'
HOST = os.environ.get('OS_HOST', '')
ENDPOINT_URL = 'http://{0}:9696'.format(HOST)
USERNAME = 'admin'
TENANT_NAME = 'admin'
PASSWORD = 'midonet'
AUTH_URL = 'http://{0}:35357/v2.0'.format(HOST)
NETNS_PREFIX = '/var/run/netns/'
PROC_TEMPLATE = '/proc/{0}/ns/net'
GLOBAL_ROUTER_NAME = 'midonet-kubernetes'
# GLOBAL_ROUTER_NAME = 'my_router'
SUBNET_RANGE = os.environ.get('SERVICE_CLUSTER_IP_RANGE', '192.168.3.0/24')
KUBE_API_SERVER_HOST = '10.240.0.12'
KUBE_API_SERVER_PORT = '8080'
KUBE_API_SERVER_URL = 'http://{0}:{1}/api/v1'.format(
KUBE_API_SERVER_HOST, KUBE_API_SERVER_PORT)
neutron = client_v2.Client(endpoint_url=ENDPOINT_URL, timeout=30,
username=USERNAME, tenant_name=TENANT_NAME,
password=PASSWORD, auth_url=AUTH_URL)
neutron.format = 'json'
docker_client = docker.Client(base_url='unix:///var/run/docker.sock')
docker_bridge = pybrctl.Bridge("docker0")
def get_hostname():
"""Returns the host name."""
return socket.gethostname()
def _get_short_docker_id(docker_id):
return docker_id[:12]
def _get_network_name(pod_namespace, host_name):
# return '-'.join([pod_namespace, host_name])
return pod_namespace
def _call_k8s_api(endpoint='/'):
response = requests.get(KUBE_API_SERVER_URL + endpoint)
return response.json()
def get_services(pod_namespace):
return _call_k8s_api('/namespaces/{0}/services'.format(pod_namespace))
def get_service(pod_namespace, service_name):
return _call_k8s_api('/namespaces/{0}/services/{1}'
.format(pod_namespace, service_name))
def _get_networks_by_attrs(unique=True, **attrs):
networks = neutron.list_networks(**attrs)
if unique and len(networks.get('networks', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron networks exist for the params {0}"
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return networks['networks']
def _get_subnets_by_attrs(unique=True, **attrs):
subnets = neutron.list_subnets(**attrs)
if unique and len(subnets.get('subnets', [])) > 2: # subnets for IPv4 and/or IPv6
raise exceptions.DuplicatedResourceException(
"Multiple Neutron subnets exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return subnets['subnets']
def _get_ports_by_attrs(unique=True, **attrs):
ports = neutron.list_ports(**attrs)
if unique and len(ports.get('ports', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron ports exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return ports['ports']
def _get_routers_by_attrs(unique=True, **attrs):
routers = neutron.list_routers(**attrs)
if unique and len(routers.get('routers', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron routers exist for the params {0}"
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return routers['routers']
def _get_vips_by_attrs(unique=True, **attrs):
vips = neutron.list_vips(**attrs)
if unique and len(vips.get('vips', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron vips exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return vips['vips']
def _get_pools_by_attrs(unique=True, **attrs):
pools = neutron.list_pools(**attrs)
if unique and len(pools.get('pools', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron pools exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return pools['pools']
def _get_members_by_attrs(unique=True, **attrs):
members = neutron.list_members(**attrs)
if unique and len(members.get('members', [])) > 1:
raise exceptions.DuplicatedResourceException(
"Multiple Neutron members exist for the params {0} "
.format(', '.join(['{0}={1}'.format(k, v)
for k, v in attrs.items()])))
return members['members']
def _get_router_ports_by_subnet_id(neutron_subnet_id, neutron_port_list):
router_ports = [
port for port in neutron_port_list
if ((neutron_subnet_id in [fip['subnet_id']
for fip in port.get('fixed_ips', [])])
or (neutron_subnet_id == port.get('subnet_id', '')))]
return router_ports
def init():
"""Initializes the network plugin.
This function is called when 'init' is given as the first argument.
"""
logger.info('Initialized the plugin')
def get_veth_name_for_container(container_info):
"""Returns the name of the veth interface associated with the container
:param container_info: the container info dictionary returned by Docker API
:returns: the veth name as string
"""
logger.info(container_info)
if not os.path.exists(NETNS_PREFIX):
os.mkdir(NETNS_PREFIX)
pid = container_info['State']['Pid']
proc_dir = PROC_TEMPLATE.format(pid)
netns_symlink_path = NETNS_PREFIX + str(pid)
veth_name = ''
try:
if not os.path.exists(netns_symlink_path):
os.symlink(proc_dir, netns_symlink_path)
logger.debug('Created a symlink {0}'.format(netns_symlink_path))
container_netns = pyroute2.IPDB(nl=pyroute2.NetNS(str(pid)))
main_netns = pyroute2.IPDB()
try:
logger.debug(container_netns.interfaces)
# logger.debug(main_netns.interfaces)
with container_netns.by_name['eth0'] as eth0:
eth0_index = eth0['index']
veth_index = eth0_index + 1
with main_netns.by_index[veth_index] as veth:
veth_name = veth['ifname']
finally:
container_netns.release()
main_netns.release()
finally:
if os.path.exists(netns_symlink_path):
os.remove(netns_symlink_path)
logger.debug('Deleted the symlink {0}'.format(netns_symlink_path))
return veth_name
def _get_or_create_subnet(container_info, neutron_network_id=''):
ip_address = container_info['NetworkSettings']['IPAddress']
prefixlen = container_info['NetworkSettings']['IPPrefixLen']
gateway_ip = container_info['NetworkSettings']['Gateway']
cidr = netaddr.IPNetwork('/'.join([ip_address, str(prefixlen)]))
subnet_network = str(cidr.network)
subnet_cidr = '/'.join([subnet_network, str(cidr.prefixlen)])
created_subnet = {}
subnets = _get_subnets_by_attrs(cidr=str(subnet_cidr),
network_id=neutron_network_id)
if not subnets:
new_subnet = {
'network_id': neutron_network_id,
'ip_version': cidr.version,
'cidr': subnet_cidr,
'gateway_ip': gateway_ip,
'enable_dhcp': False,
}
created_subnet_response = neutron.create_subnet({'subnet': new_subnet})
created_subnet = created_subnet_response['subnet']
else:
created_subnet = subnets[0]
logger.debug('Reusing the existing subnet {0}'
.format(created_subnet['id']))
return created_subnet
def _get_or_create_cluster_ip_subnet(neutron_network_id=''):
ip_network = netaddr.IPNetwork(SUBNET_RANGE)
subnets = _get_subnets_by_attrs(cidr=SUBNET_RANGE,
network_id=neutron_network_id)
if not subnets:
new_subnet = {
'network_id': neutron_network_id,
'ip_version': ip_network.version,
'cidr': SUBNET_RANGE,
'enable_dhcp': False,
}
created_subnet_response = neutron.create_subnet({'subnet': new_subnet})
created_subnet = created_subnet_response['subnet']
else:
created_subnet = subnets[0]
logger.debug('Reusing the existing subnet {0}'
.format(created_subnet['id']))
return created_subnet
def _get_or_create_router(pod_namespace):
router_name = pod_namespace
routers = _get_routers_by_attrs(name=router_name)
router = {}
if not routers:
created_router_resopnse = neutron.create_router(
{'router': {'name': router_name}})
router = created_router_resopnse['router']
logger.debug('Created the router {0}'.format(router))
else:
router = routers[0]
logger.debug('Reusing the router {0}'.format(router['id']))
return router
def _get_or_create_pools_and_vips(service_name, subnet_id, service_spec):
cluster_ip = service_spec['clusterIP']
ports = service_spec['ports']
pools = []
vips = []
for port in ports:
protocol = port['protocol']
protocol_port = port['targetPort']
neutron_pools = _get_pools_by_attrs(
name=service_name, protocol=protocol, subnet_id=subnet_id)
neutron_pool = {}
if not neutron_pools:
pool_request = {
'pool': {
'name': service_name,
'protocol': protocol,
'subnet_id': subnet_id,
'lb_method': 'ROUND_ROBIN',
},
}
neutron_pool_response = neutron.create_pool(pool_request)
neutron_pool = neutron_pool_response['pool']
else:
neutron_pool = neutron_pools[0]
pools.append(neutron_pool)
pool_id = neutron_pool['id']
neutron_vips = _get_vips_by_attrs(
name=service_name, protocol=protocol, subnet_id=subnet_id,
pool_id=pool_id, address=cluster_ip)
neutron_vip = {}
if not neutron_vips:
vip_request = {
'vip': {
# name is not necessary unique and the service name is
# used for the group of the vips.
'name': service_name,
'pool_id': pool_id,
'subnet_id': subnet_id,
'address': cluster_ip,
'protocol': protocol,
'protocol_port': protocol_port,
},
}
neutron_vip_response = neutron.create_vip(vip_request)
neutron_vip = neutron_vip_response['vip']
else:
neutron_vip = neutron_vips[0]
vips.append(neutron_vip)
return (pools, vips)
def _get_ip_address_in_port(neutron_port):
ip_address = neutron_port.get('ip_address', '')
fixed_ips = neutron_port.get('fixed_ips', [])
if not ip_address:
for fixed_ip in fixed_ips:
ip = netaddr.IPAddress(fixed_ip['ip_address'])
if ip.version == 4:
ip_address = fixed_ip['ip_address']
break
return ip_address
def _create_port(container_info, neutron_network_id,
neutron_subnet_id, pod_name):
ip_address = container_info['NetworkSettings']['IPAddress']
mac_address = container_info['NetworkSettings']['MacAddress']
new_port = {
'name': pod_name,
'network_id': neutron_network_id,
'mac_address': mac_address,
'fixed_ips': [{
'subnet_id': neutron_subnet_id,
'ip_address': ip_address,
}],
}
created_port_response = neutron.create_port({'port': new_port})
created_port = created_port_response['port']
return created_port
def get_service_name(pod_name):
"""Returns the service name from the pod name."""
return pod_name[:-6]
def _emulate_kube_proxy(pod_namespace, pod_name, cluster_ip_subnet_id, neutron_port):
service_name = get_service_name(pod_name)
service = get_service(pod_namespace, service_name)
service_spec = service['spec']
pools, vips = _get_or_create_pools_and_vips(
service_name, cluster_ip_subnet_id, service_spec)
# NOTE(tfukushima): The current Neutron model assumes the single VIP can be
# create under the same subnet, which is not true in K8s assumption. This
# introduces the limitation that we support only the single "port" entity
# in the "ports" secton of the spec file.
neutron_pool = pools[0]
neutron_vip = vips[0]
member_request = {
'member': {
'pool_id': neutron_pool['id'],
'address': _get_ip_address_in_port(neutron_port),
'protocol_port': neutron_vip['protocol_port'],
'weight': 1,
}
}
neutron_member_response = neutron.create_member(member_request)
neutron_member = neutron_member_response['member']
logger.debug('Created a new member {0} for the pool {1} associated with the'
'vip {2}'
.format(neutron_member['id'], neutron_pool['id'],
neutron_vip['id']))
def _cleanup_emulated_kube_proxy(pod_namespace, pod_name, cluster_ip_subnet_id, port):
service_name = get_service_name(pod_name)
pools = _get_pools_by_attrs(name=service_name, subnet_id=cluster_ip_subnet_id)
vips = _get_vips_by_attrs(name=service_name, subnet_id=cluster_ip_subnet_id)
if pools:
neutron_pool = pools[0]
neutron_vip = vips[0]
address = _get_ip_address_in_port(port)
pool_id = neutron_pool['id']
members = _get_members_by_attrs(address=address, pool_id=pool_id)
member = members[0]
neutron.delete_member(member['id'])
vip_id = neutron_vip['id']
try:
neutron.delete_vip(vip_id)
except n_exceptions.Conflict:
logger.info('The vip {0} is still in use.'.format(vip_id))
try:
neutron.delete_pool(pool_id)
except n_exceptions.Conflict:
logger.info('The pool {0} is still in use.'.format(pool_id))
logger.debug('Successfully cleaned the emulated kube-proxy resources up')
@lockutils.synchronized('k8s-np-lock', lock_file_prefix='k8s-np-lock',
external=True, lock_path='/tmp/')
def setup(pod_namespace, pod_name, container_id):
"""Creates the network for the container.
This function is called when 'setup' is given as the first argument.
"""
network = {}
# Map Pod's namespace into Neutron network.
network_name = _get_network_name(pod_namespace, get_hostname())
networks = _get_networks_by_attrs(name=network_name)
if not networks:
created_network_response = neutron.create_network(
{'network': {'name': network_name}})
network = created_network_response['network']
logger.debug('Created the network {0}'.format(network))
else:
network = networks[0]
logger.debug('Reusing the network {0}'.format(network['id']))
neutron_network_id = network['id']
container_info = docker_client.inspect_container(container_id)
# Create a new subnet if the corresponding one doesn't exist.
subnet = _get_or_create_subnet(container_info, network['id'])
router = _get_or_create_router(GLOBAL_ROUTER_NAME)
neutron_router_id = router['id']
neutron_subnet_id = subnet['id']
filtered_ports = _get_ports_by_attrs(
unique=False, device_owner='network:router_interface',
device_id=neutron_router_id, network_id=neutron_network_id)
router_ports = _get_router_ports_by_subnet_id(
neutron_subnet_id, filtered_ports)
if not router_ports:
neutron.add_interface_router(
neutron_router_id, {'subnet_id': neutron_subnet_id})
else:
logger.debug('The subnet {0} is already bound to the router'
.format(neutron_subnet_id))
cluster_ip_subnet = _get_or_create_cluster_ip_subnet(network['id'])
cluster_ip_subnet_id = cluster_ip_subnet['id']
cluster_ip_router_ports = _get_router_ports_by_subnet_id(
cluster_ip_subnet_id, filtered_ports)
if not cluster_ip_router_ports:
neutron.add_interface_router(
neutron_router_id, {'subnet_id': cluster_ip_subnet_id})
else:
logger.debug('The cluster IP subnet {0} is already bound to the router'
.format(cluster_ip_subnet_id))
port = _create_port(container_info, neutron_network_id,
neutron_subnet_id, pod_name)
logger.debug('Created a new port {0}'.format(port['id']))
_emulate_kube_proxy(pod_namespace, pod_name, cluster_ip_subnet_id, port)
# Getting the veth name.
veth_name = get_veth_name_for_container(container_info)
docker_bridge.delif(veth_name)
port_id = port['id']
try:
stdout, stderr = processutils.execute(
BINDING_EXECUTABLE, BIND, port_id, veth_name,
run_as_root=True)
except processutils.ProcessExecutionError as ex:
logger.error('Binding the port is failed: {0}'.format(ex))
sys.exit(-1)
logger.debug('Successfully bound the port {0} to {1}'
.format(port_id, veth_name))
@lockutils.synchronized('k8s-np-lock', lock_file_prefix='k8s-np-lock',
external=True, lock_path='/tmp/')
def teardown(pod_namespace, pod_name, container_id):
"""Destroys the network for the container.
This function is called when 'teardown' is given as the first argument.
"""
network_name = _get_network_name(pod_namespace, get_hostname())
filtered_networks = _get_networks_by_attrs(name=network_name)
neutron_network_id = filtered_networks[0]['id']
container_info = docker_client.inspect_container(container_id)
filtered_ports = _get_ports_by_attrs(name=pod_name)
if filtered_ports:
port = filtered_ports[0]
port_id = port['id']
try:
stdout, stderr = processutils.execute(
BINDING_EXECUTABLE, UNBIND, port_id, run_as_root=True)
except processutils.ProcessExecutionError as ex:
logger.error('Unbinding the port is failed: {0}'.format(ex))
sys.exit(-1)
logger.debug('Successfully unbound the port {0}'.format(port_id))
cluster_ip_subnet = _get_or_create_cluster_ip_subnet(neutron_network_id)
cluster_ip_subnet_id = cluster_ip_subnet['id']
_cleanup_emulated_kube_proxy(pod_namespace, pod_name, cluster_ip_subnet_id, port)
neutron.delete_port(port_id)
logger.debug('Successfuly deleted the port {0}'.format(port_id))
subnet = _get_or_create_subnet(container_info, neutron_network_id)
neutron_subnet_id = subnet['id']
router = _get_or_create_router(GLOBAL_ROUTER_NAME)
neutron_router_id = router['id']
filtered_ports = _get_ports_by_attrs(
unique=False, device_owner='network:router_interface',
device_id=neutron_router_id, network_id=neutron_network_id)
router_ports = _get_router_ports_by_subnet_id(neutron_subnet_id, filtered_ports)
if len(router_ports) == 1:
neutron.remove_interface_router(
neutron_router_id, {'subnet_id': neutron_subnet_id})
logger.debug('The subnet {0} is unbound from the router {1}'
.format(neutron_subnet_id, neutron_router_id))
try:
neutron.delete_subnet(neutron_subnet_id)
logger.debug('Deleted the subnet {0}'.format(neutron_subnet_id))
except n_exceptions.Conflict as ex:
logger.info('The subnet {0} is still in use.'
.format(neutron_subnet_id))
cluster_ip_router_ports = _get_router_ports_by_subnet_id(
cluster_ip_subnet_id, filtered_ports)
if len(cluster_ip_router_ports) == 1:
neutron.remove_interface_router(
neutron_router_id, {'subnet_id': cluster_ip_subnet_id})
logger.debug('The cluster IP subnet {0} is unbound from the router {1}'
.format(cluster_ip_subnet_id, neutron_router_id))
try:
neutron.delete_subnet(cluster_ip_subnet_id)
logger.debug('The cluseter IP subnet {0} is deleted successfully.'
.format(cluster_ip_subnet_id))
except n_exceptions.Conflict:
logger.info('The cluseter IP subnet {0} is still in use.'
.format(cluster_ip_subnet_id))
try:
neutron.delete_network(neutron_network_id)
except n_exceptions.Conflict as ex:
logger.info('The network {0} is still in use.'
.format(neutron_network_id))
logger.debug('Deleleted the network {0}'.format(neutron_network_id))
def status(pod_namespace, pod_name, container_id):
"""Reports the status of the containers identifed by the given information.
This function is called when 'status' is given as the first argument.
"""
network_name = pod_namespace + get_hostname()
filtered_networks = _get_networks_by_attrs(name=network_name)
if not filtered_networks:
return
network = filtered_networks[0]
neutron_network_id = network['id']
filtered_ports = _get_ports_by_attrs(
name=pod_name, network_id=neutron_network_id)
if not filtered_ports:
return
port = filtered_ports[0]
ip_address = _get_ip_address_in_port(port)
status_response = {
"apiVersion": "v1beta1",
"kind": "PodNetworkStatus",
}
status_response['ip'] = ip_address
logger.debug('Sending the status of {0}, {1}: {2}'
.format(pod_name, network_name, status_response))
sys.stdout.write(json.dumps(status_response))
def dispatch(action, pod_namespace=None, pod_name=None, container_id=None):
"""Run the actual action with the given arguments.
Curretly the following actions are supported.
- init
- setup <pod_namespace> <pod_name> <container_id>
- teardown <pod_namespace> <pod_name> <container_id>
- status <pod_namespace> <pod_name> <container_id>
After executing the action, it exits with the return code 0. Otherwise it
eixits with the non-zero return code.
See the following link for more details.
- https://godoc.org/github.com/kubernetes/kubernetes/pkg/kubelet/network/exec # noqa
"""
if action == actions.INIT:
logger.debug('init is called.')
init()
elif action == actions.SETUP:
logger.debug('setup is called.')
setup(pod_namespace, pod_name, container_id)
elif action == actions.TEARDOWN:
logger.debug('teardown is called.')
teardown(pod_namespace, pod_name, container_id)
elif action == actions.STATUS:
logger.debug('status is called.')
status(pod_namespace, pod_name, container_id)
sys.exit(0)
def _dispatch_log():
"""Dispatches the action and catch exceptions to be logged.
After executing the action, it exits with return code 0 if it succeeded to
run through. Othereise it exits with the non-zero return code.
"""
args = sys.argv
action = args[1]
pod_namespace = args[2] if len(args) > 3 else None
pod_name = args[3] if len(args) > 4 else None
container_id = args[4] if len(args) >= 5 else None
logger.debug("MidoNet plugin executable was called with action: {0}, "
"pod_namespace: {1}, pod_name: {2}, container_id: {3}"
.format(action, pod_namespace, pod_name, container_id))
return_code = 0
try:
dispatch(action, pod_namespace=pod_namespace,
pod_name=pod_name,
container_id=container_id)
except SystemExit, e:
return_code = e.code
except Exception, e:
logger.error("Unhandled exception: %s", e)
logger.error(traceback.format_exc())
return_code = -1
finally:
logger.debug("MidoNet plugin succeeded to be executed: %s",
return_code)
sys.exit(return_code)
if __name__ == '__main__':
_dispatch_log()
| |
from __future__ import division, print_function
import imp
import os
import sys
import shutil
import pickle
import copy
import warnings
import re
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
return 'NPY_NOSMP' in os.environ
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args = tup
else:
f, args, headers = tup[0], tup[1], [tup[2]]
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((fname2def(f), 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365")
return priv, pub
except:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers = ["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {}
expected['short'] = [2]
expected['int'] = [4]
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
expected['long double'] = [16, 12, 8]
expected['Py_intptr_t'] = [8, 4]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
expected['off_t'] = [8, 4]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "\
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers = ["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def,
expected=[2 * x for x in expected[type]])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"\
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info, default_lib_dirs
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = imp.load_module('_'.join(n.split('.')),
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform=='win32' or os.name=='nt':
win32_checks(moredefs)
# C99 restrict keyword
moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
# Inline check
inline = config_cmd.check_inline()
# Check whether we need our own wide character support
if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
PYTHON_HAS_UNICODE_WIDE = True
else:
PYTHON_HAS_UNICODE_WIDE = False
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
join(codegen_dir, 'genapi.py'),
]
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources = [join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources=[join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
# Multiarray version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_multiarray_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'multiarray')
sources = [join(local_dir, subpath, 'scalartypes.c.src'),
join(local_dir, subpath, 'arraytypes.c.src'),
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src'),
join(local_dir, 'src', 'private', 'templ_common.h.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'numpymemoryview.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'compiled_base.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpymemoryview.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
join('src', 'multiarray', 'vdot.c'),
join('src', 'private', 'templ_common.h.src'),
]
blas_info = get_info('blas_opt', 0)
if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
extra_info = blas_info
multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
join('src', 'multiarray', 'python_xerbla.c'),
])
else:
extra_info = {}
if not ENABLE_SEPARATE_COMPILATION:
multiarray_deps.extend(multiarray_src)
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
config.add_extension('multiarray',
sources=multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends=deps + multiarray_deps,
libraries=['npymath', 'npysort'],
extra_info=extra_info)
#######################################################################
# umath module #
#######################################################################
# umath version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_umath_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'umath')
sources = [
join(local_dir, subpath, 'loops.h.src'),
join(local_dir, subpath, 'loops.c.src'),
join(local_dir, subpath, 'scalarmath.c.src'),
join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
join('src', 'multiarray', 'common.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'ufunc_override.h')] + npymath_sources
if not ENABLE_SEPARATE_COMPILATION:
umath_deps.extend(umath_src)
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources = umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends = deps + umath_deps,
libraries = ['npymath'],
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources = [join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources = [join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__=='__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| |
import os
import sys
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as offline
import seaborn as sns
import statsmodels.api as sm
from sklearn.decomposition import PCA
TEST_METHOD: bool = False
TEST_FOUR_DAYS: bool = False # cannot be True if TEST_METHOD is True
INTERPOLATE_DATA: bool = True # cannot be True if TEST_METHOD is True
TIME_WINDOW: int = 4
USE_FOURIER_TRANSFORM: bool = False
FOURIER_COEF_NUM: int = 15
HODRICK_PRESCOTT_LAMBDA: int = 15000
try:
assert sys.version_info.major == 3
assert sys.version_info.minor > 5
except AssertionError:
raise RuntimeError("This code requires Python 3.6+.")
def interpolate_data(df):
"""Interpolate to hourly data so that FFT works correctly.
Parameters
----------
df: pandas DataFrame
Index([
'timepoint', 'elapsed_time', 'temperature',
'experiment', 'case', 'id', 'condition'])
Returns
-------
A interpolated long DataFrame.
"""
df_interp = pd.DataFrame(columns=df.columns)
for uuid in ids:
df_uuid = df[df["id"] == uuid][["timepoint", "temperature"]]
timepoint = pd.date_range(
start=df_uuid.timepoint.min(),
end=df_uuid.timepoint.max(),
freq="30min"
)
df_return = pd.DataFrame(dict(timepoint=timepoint, id=uuid))
df_return = df_return.join(df_uuid.set_index("timepoint"), on="timepoint", how="left")
df_return = df_return.interpolate(method="cubic")
df_interp = pd.concat([df_interp, df_return], ignore_index=True)
return df_interp
def hodrick_prescott(df: pd.DataFrame, lamb: int=1600) -> pd.DataFrame:
"""Use the Hodrick-Prescott Filter to estimate the trend.
Parameters
----------
df: pandas DataFrame
The temperature data at different timepoints
lamb: float, optional
The Hodrick-Prescott smoothing parameter. The larger it is the
smoother the outcome gets.
Returns
-------
df with a new column containing the trend, and the de-trended data.
"""
df = df.sort_values("timepoint")
_, df["trend"] = sm.tsa.filters.hpfilter(df.temperature, lamb=lamb)
df["detrended"] = df["temperature"] - df["trend"]
return df
def fourier_transform(dfs, t: int=2, n: int=10, detrend: bool=True) -> pd.DataFrame:
"""Perform a Fourier transform on the de-trended data.
Parameters
----------
dfs: list of pandas DataFrames
trend_list: a series of output from the function `hodrick_prescott`.
t: int, optional
The time window to scan df.
n: int, optional
The number of coefficients in the output.
detrend: bool, optional
Whether to use de-trended data or the original temperature data.
Returns
-------
The Fast Fourier Transformed matrix of coefficients.
"""
M_matrix = pd.DataFrame(columns=[f"c{i+1}" for i in range(n)])
for df, uuid in zip(dfs, ids):
if detrend:
df = df.loc[:, ["timepoint", "detrended"]]
else:
df = df.loc[:, ["timepoint", "temperature"]]
df["measure_date"] = df.timepoint.apply(lambda x: x.date())
time_windows = df["measure_date"].unique()
time_windows = [time_windows[i:i + t] for i in range(len(time_windows) - t + 1)]
for time_window in time_windows:
data = df.loc[df["measure_date"].isin(time_window)]
date_range = f"{str(time_window.min())[-5:]}_{str(time_window.max())[-5:]}"
if detrend:
M_matrix.loc[f"{uuid}_{date_range}"] = abs(np.fft.fft(a=data["detrended"], n=n))
else:
M_matrix.loc[f"{uuid}_{date_range}"] = abs(np.fft.fft(a=data["temperature"], n=n))
return M_matrix
def fourier_series(dfs, t: int=2, n: int=10, detrend: bool=True) -> pd.DataFrame:
"""Perform a Fourier cosine series on the data.
Parameters
----------
dfs: list of pandas DataFrames
trend_list: a series of output from the function `hodrick_prescott`.
t: int, optional
The time window (days) to scan df.
n: int, optional
The number of coefficients in the output.
detrend: bool, optional
Whether to use de-trended data or the original temperature data.
Returns
-------
S_matrices: A long DataFrame w/ columns timepoint, id, fcs, and temperature / detrended.
M_matrix: The Fourier Cosine Transformed matrix.
"""
def integrator(y, x):
I = 0
for i in range(0, len(y) - 2, 2):
a = x.iloc[i]
b = x.iloc[i + 2]
beginning = y.iloc[i]
middle = y.iloc[i + 1]
ending = y.iloc[i + 2]
I += (b - a) / 6 * (beginning + 4 * middle + ending)
return I
def calc_series(n, x, y):
L = x.iloc[-1] - x.iloc[0]
# S = y.mean()
S = 1 / L * integrator(y, x)
c = []
for i in range(1, n + 1):
p = y * np.cos(i * np.pi * x / L)
q = np.cos(i * np.pi * x / L) ** 2
c.append(integrator(p, x) / integrator(q, x))
S += c[i - 1] * np.cos(i * np.pi * x / L) # S should be an array w/ the same len as x
return dict(S=S, c=c)
S_matrices = pd.DataFrame()
M_matrix = pd.DataFrame(columns=[f"c{i+1}" for i in range(n)])
for df, uuid in zip(dfs, ids):
df = df.sort_values("timepoint")
df["measure_date"] = df.timepoint.apply(lambda x: x.date())
time_windows = df["measure_date"].unique()
time_windows = [time_windows[i:i + t] for i in range(len(time_windows) - t + 1)]
for time_window in time_windows:
data = df.loc[df["measure_date"].isin(time_window)]
date_range = f"{str(time_window.min())[-5:]}_{str(time_window.max())[-5:]}"
x = data["timepoint"]
x = (x - x.min()) / np.timedelta64(1, "h")
if detrend:
y = data["detrended"]
else:
y = data["temperature"]
series = calc_series(n, x, y)
M_matrix.loc[f"{uuid}_{date_range}"] = series["c"]
S_matrix = pd.DataFrame(dict(
timepoint=data["timepoint"],
fcs=series["S"],
id=f"{uuid}_{date_range}"
))
if detrend:
S_matrix["detrended"] = y
else:
S_matrix["temperature"] = y
S_matrices = S_matrices.append(S_matrix)
return S_matrices, M_matrix
def tsplot(df, uuid, trend=True, detrended=True, save_image=False):
"""
Parameters
----------
df: pandas DataFrame
Output of function `hodrick_prescott`.
uuid: str
Experiment and case identifier.
save_image: bool, optional
Whether to download the figure as a static file.
Returns
-------
A long DataFrame containing the temperature, trend,
and variance and mean of the deviation.
"""
fig = {
'data': [
{
'x': df['timepoint'],
'y': df["temperature"],
'name': "Temperature",
'opacity': 0.7,
'mode': 'lines+markers'
}
],
'layout': {
"title": uuid,
'xaxis': {'title': 'Date'},
'yaxis': {'title': "Temperature"}
}
}
if trend:
fig["data"].append(
{
'x': df['timepoint'],
'y': df["trend"],
'name': "Hodrick-Prescott filter trend",
'opacity': 0.7
}
)
if detrended:
fig["data"][0] = {
'x': df['timepoint'],
'y': df["detrended"],
'name': "De-trended",
"yaxis": "y2",
'opacity': 0.7
}
fig["layout"]["yaxis2"] = {
"title": "Temperature",
"overlaying": "y",
"side": "right"
}
if save_image:
offline.iplot(fig, filename=f"{uuid}", show_link=False, image="png")
else:
offline.iplot(fig, filename=f"{uuid}", show_link=False)
return fig
# Read temperature data:
if TEST_METHOD:
import matplotlib.pyplot as plt
np.random.seed(0)
df = pd.DataFrame()
for uuid in ["group1_", "group2_", "group3_"]:
x = pd.date_range('1/1/2018', periods=120, freq='H')
xe = (x - x.min()) / np.timedelta64(1, "h")
noise = np.random.normal(0, 1, len(x))
s = np.std(np.cos(xe) * xe) / np.random.normal()
y = np.cos(xe) * xe + s * noise
df = df.append(pd.DataFrame(
dict(timepoint=x,
elapsed_time=xe,
temperature=y,
id=uuid
)))
plt.plot(x, y)
else:
df = pd.read_csv("malaria.csv")
df["timepoint"] = pd.to_datetime(df["timepoint"])
df["id"] = df[["experiment", "case"]].apply("_".join, axis=1)
df = df.loc[:, ["timepoint", "temperature", "id"]]
ids = df["id"].unique()
df.head()
if TEST_FOUR_DAYS:
res = pd.DataFrame()
for uuid in ids:
x = df[df["id"] == uuid]
first_four_days = x["timepoint"].min() + pd.DateOffset(3)
x = x.loc[x.timepoint <= first_four_days]
res = res.append(x)
df = res
# Interpolate temperature data and find when the fevers happened:
if INTERPOLATE_DATA:
df = interpolate_data(df)
# Calculate the trend using the Hodrick-Prescott filter:
trend_list = []
for uuid in ids:
x = df[df["id"] == uuid][["timepoint", "temperature"]]
trend_list.append(hodrick_prescott(x, lamb=HODRICK_PRESCOTT_LAMBDA))
figs = [tsplot(x, uuid, trend=True, detrended=False, save_image=False) for x, uuid in zip(trend_list, ids) if uuid in ["E30_RKy15", "E06_RIh16", "E07B_11C166"]]
# Check the de-trended data:
if USE_FOURIER_TRANSFORM:
M_matrix = fourier_transform(trend_list, t=TIME_WINDOW, n=FOURIER_COEF_NUM, detrend=True)
else:
S_matrix, M_matrix = fourier_series(trend_list, t=TIME_WINDOW, n=FOURIER_COEF_NUM, detrend=True)
dfm = S_matrix.loc[S_matrix["id"].str.startswith("E30_RKy15")]
dfm = dfm.groupby("timepoint").mean().reset_index()
fig = {
'data': [
go.Scatter(
x=pd.to_datetime(dfm['timepoint']),
y=dfm['detrended'],
name="Temperature",
opacity=0.7
),
go.Scatter(
x=pd.to_datetime(dfm["timepoint"]),
y=dfm["fcs"],
name="Fourier cosine series",
opacity=0.7
)
],
'layout': {
'xaxis': {'title': 'Date'},
'yaxis': {'title': "Temperature"},
'title': "E30 RKy15"
}
}
offline.iplot(fig, show_link=False, image="png")
M_matrix.head()
pca = PCA(n_components=2, random_state=0)
c = pca.fit(M_matrix.T)
x, y = c.components_
print(f"Explained variance ratio: {c.explained_variance_ratio_}")
projection = pca.inverse_transform(pca.transform(M_matrix.T))
loss = ((M_matrix.T - projection) ** 2).mean()
loss.head()
obs = M_matrix.index.tolist()
data = pd.DataFrame(dict(PC1=x, PC2=y), index=obs)
data["group"] = [x.split("_")[0] for x in data.index]
fig = sns.lmplot(
data=data, x="PC1", y="PC2", hue="group", size=10,
fit_reg=False, scatter_kws={'alpha': 0.7}, markers=["o", "x", "s"]
)
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import hashlib
import logging
import random
import socket
import threading
import cherrypy
import safe
from flask import Flask, abort, redirect
from flask_login import UserMixin
from sqlalchemy import Column, Integer, Unicode
from werkzeug.security import generate_password_hash
from flexget.config_schema import register_config_key
from flexget.event import event
from flexget.manager import Base
from flexget.utils.database import with_session
from flexget.utils.tools import singleton
log = logging.getLogger('web_server')
_home = None
_app_register = {}
_default_app = Flask(__name__)
random = random.SystemRandom()
web_config_schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'bind': {'type': 'string', 'format': 'ipv4', 'default': '0.0.0.0'},
'port': {'type': 'integer', 'default': 3539},
},
'additionalProperties': False
}
]
}
def generate_key():
""" Generate key for use to authentication """
return str(hashlib.sha224(str(random.getrandbits(128)).encode('utf-8')).hexdigest())
def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits.
Taken from the django.utils.crypto module.
"""
return ''.join(random.choice(allowed_chars) for __ in range(length))
@with_session
def get_secret(session=None):
""" Generate a secret key for flask applications and store it in the database. """
web_secret = session.query(WebSecret).first()
if not web_secret:
web_secret = WebSecret(id=1, value=get_random_string(50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'))
session.add(web_secret)
session.commit()
return web_secret.value
class WeakPassword(Exception):
def __init__(self, value, logger=log, **kwargs):
super(WeakPassword, self).__init__()
# Value is expected to be a string
if not isinstance(value, str):
value = str(value)
self.value = value
self.log = logger
self.kwargs = kwargs
def __str__(self):
return str(self).encode('utf-8')
def __unicode__(self):
return str(self.value)
class User(Base, UserMixin):
""" User class available for flask apps to handle authentication using flask_login """
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(Unicode(50), unique=True)
token = Column(Unicode, default=generate_key)
password = Column(Unicode)
def __repr__(self):
return '<User %r>' % self.name
def get_id(self):
return self.name
class WebSecret(Base):
""" Store flask secret in the database """
__tablename__ = 'secret'
id = Column(Unicode, primary_key=True)
value = Column(Unicode)
@event('config.register')
def register_config():
register_config_key('web_server', web_config_schema)
def register_app(path, application):
if path in _app_register:
raise ValueError('path %s already registered')
_app_register[path] = application
def register_home(route):
"""Registers UI home page"""
global _home
_home = route
@_default_app.route('/')
def start_page():
""" Redirect user to registered UI home """
if not _home:
abort(404)
return redirect(_home)
@event('manager.daemon.started', -255) # Low priority so plugins can register apps
@with_session
def setup_server(manager, session=None):
""" Sets up and starts/restarts the web service. """
if not manager.is_daemon:
return
web_server_config = manager.config.get('web_server')
if not web_server_config:
return
web_server = WebServer(
bind=web_server_config['bind'],
port=web_server_config['port'],
)
_default_app.secret_key = get_secret()
user = get_user()
if not user or not user.password:
log.warning('No password set for web server, create one by using'
' `flexget web passwd <password>`')
if web_server.is_alive():
web_server.stop()
if _app_register:
web_server.start()
@event('manager.shutdown')
def stop_server(manager):
""" Sets up and starts/restarts the webui. """
if not manager.is_daemon:
return
web_server = WebServer()
if web_server.is_alive():
web_server.stop()
@singleton
class WebServer(threading.Thread):
# We use a regular list for periodic jobs, so you must hold this lock while using it
triggers_lock = threading.Lock()
def __init__(self, bind='0.0.0.0', port=5050):
threading.Thread.__init__(self, name='web_server')
self.bind = str(bind) # String to remove unicode warning from cherrypy startup
self.port = port
def start(self):
# If we have already started and stopped a thread, we need to reinitialize it to create a new one
if not self.is_alive():
self.__init__(bind=self.bind, port=self.port)
threading.Thread.start(self)
def _start_server(self):
# Mount the WSGI callable object (app) on the root directory
cherrypy.tree.graft(_default_app, '/')
for path, registered_app in _app_register.items():
cherrypy.tree.graft(registered_app, path)
cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
# Set the configuration of the web server
cherrypy.config.update({
'engine.autoreload.on': False,
'server.socket_port': self.port,
'server.socket_host': self.bind,
'log.screen': False,
})
try:
host = self.bind if self.bind != "0.0.0.0" else socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host = '127.0.0.1'
log.info('Web interface available at http://%s:%s' % (host, self.port))
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
def run(self):
self._start_server()
def stop(self):
log.info('Shutting down web server')
cherrypy.engine.exit()
@with_session
def get_user(username='flexget', session=None):
user = session.query(User).filter(User.name == username).first()
if not user:
user = User()
user.name = username
session.add(user)
return user
@with_session
def change_password(username='flexget', password='', session=None):
check = safe.check(password)
if check.strength not in ['medium', 'strong']:
raise WeakPassword('Password {0} is not strong enough'.format(password))
user = get_user(username=username, session=session)
user.password = str(generate_password_hash(password))
session.commit()
@with_session
def generate_token(username='flexget', session=None):
user = get_user(username=username, session=session)
user.token = generate_key()
session.commit()
return user.token
| |
# pylint: disable=unused-argument,too-many-arguments,redefined-outer-name
"""
Tests for serializers for profiles REST APIS
"""
import pytest
import factory
from django.core.files.uploadedfile import SimpleUploadedFile
from rest_framework.exceptions import ValidationError
from profiles.factories import UserWebsiteFactory
from profiles.models import Profile, PERSONAL_SITE_TYPE, FACEBOOK_DOMAIN
from profiles.serializers import (
UserSerializer,
ProfileSerializer,
UserWebsiteSerializer,
)
small_gif = (
b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04"
b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02"
b"\x02\x4c\x01\x00\x3b"
)
def test_serialize_user(user):
"""
Test serializing a user
"""
profile = user.profile
assert UserSerializer(user).data == {
"id": user.id,
"username": user.username,
"profile": {
"name": profile.name,
"image": profile.image,
"image_small": profile.image_small,
"image_medium": profile.image_medium,
"image_file": profile.image_file.url,
"image_small_file": profile.image_small_file.url,
"image_medium_file": profile.image_medium_file.url,
"profile_image_small": profile.image_small_file.url,
"profile_image_medium": profile.image_medium_file.url,
"bio": profile.bio,
"headline": profile.headline,
"username": profile.user.username,
"placename": profile.location["value"],
},
}
def test_serialize_create_user(db, mocker):
"""
Test creating a user
"""
profile = {
"name": "name",
"image": "image",
"image_small": "image_small",
"image_medium": "image_medium",
"email_optin": True,
"toc_optin": True,
"bio": "bio",
"headline": "headline",
"placename": "",
}
get_or_create_auth_tokens_stub = mocker.patch(
"channels.api.get_or_create_auth_tokens"
)
enrollment_job_mock = mocker.patch(
"authentication.api.update_enrollments_for_email.delay"
)
serializer = UserSerializer(data={"email": "test@localhost", "profile": profile})
serializer.is_valid(raise_exception=True)
user = serializer.save()
get_or_create_auth_tokens_stub.assert_called_once_with(user)
enrollment_job_mock.assert_called_once_with(user.email)
del profile["email_optin"] # is write-only
del profile["toc_optin"] # is write-only
profile.update(
{
"image_file": None,
"image_small_file": None,
"image_medium_file": None,
"profile_image_small": "image_small",
"profile_image_medium": "image_medium",
"username": user.username,
}
)
assert UserSerializer(user).data == {
"id": user.id,
"username": user.username,
"profile": profile,
}
@pytest.mark.parametrize(
"key,value",
[
("name", "name_value"),
("image", "image_value"),
("image_small", "image_small_value"),
("image_medium", "image_medium_value"),
("email_optin", True),
("email_optin", False),
("bio", "bio_value"),
("headline", "headline_value"),
("toc_optin", True),
("toc_optin", False),
],
)
def test_update_user_profile(mocker, user, key, value):
"""
Test updating a profile via the UserSerializer
"""
mock_after_profile_created_or_updated = mocker.patch(
"profiles.serializers.after_profile_created_or_updated"
)
profile = user.profile
serializer = UserSerializer(
instance=user, data={"profile": {key: value}}, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
profile2 = Profile.objects.get(user=user)
for prop in (
"name",
"image",
"image_small",
"image_medium",
"email_optin",
"toc_optin",
"bio",
"headline",
):
if prop == key:
if isinstance(value, bool):
assert getattr(profile2, prop) is value
else:
assert getattr(profile2, prop) == value
else:
assert getattr(profile2, prop) == getattr(profile, prop)
mock_after_profile_created_or_updated.assert_called_once_with(profile)
@pytest.mark.parametrize(
"data,is_valid",
[
({}, True),
("notjson", False),
({"bad": "json"}, False),
(None, True),
({"value": "city"}, True),
],
)
def test_location_validation(user, data, is_valid):
"""Test that lcoation validation works correctly"""
serializer = ProfileSerializer(
instance=user.profile, data={"location": data}, partial=True
)
assert serializer.is_valid(raise_exception=False) is is_valid
@pytest.mark.parametrize(
"key,value",
[
("name", "name_value"),
("bio", "bio_value"),
("headline", "headline_value"),
("location", {"value": "Hobbiton, The Shire, Middle-Earth"}),
(
"image_file",
SimpleUploadedFile("small.gif", small_gif, content_type="image/gif"),
),
],
)
def test_update_profile(mocker, user, key, value):
"""
Test updating a profile via the ProfileSerializer
"""
mock_after_profile_created_or_updated = mocker.patch(
"profiles.serializers.after_profile_created_or_updated"
)
profile = user.profile
serializer = ProfileSerializer(
instance=user.profile, data={key: value}, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
profile2 = Profile.objects.first()
for prop in (
"name",
"image_file",
"email_optin",
"toc_optin",
"bio",
"headline",
"location",
):
if prop == key:
if isinstance(value, bool):
assert getattr(profile2, prop) is value
elif key == "image_file":
assert getattr(profile2, prop).read() == small_gif
else:
assert getattr(profile2, prop) == value
else:
assert getattr(profile2, prop) == getattr(profile, prop)
mock_after_profile_created_or_updated.assert_called_once_with(profile)
def test_serialize_profile_websites(user):
"""Tests that the ProfileSerializer includes UserWebsite information when an option is set via the context"""
profile = user.profile
user_websites = UserWebsiteFactory.create_batch(
2,
profile=profile,
site_type=factory.Iterator([PERSONAL_SITE_TYPE, FACEBOOK_DOMAIN]),
)
serialized_profile = ProfileSerializer(
profile, context={"include_user_websites": True}
).data
serialized_sites = UserWebsiteSerializer(user_websites, many=True).data
assert len(serialized_profile["user_websites"]) == 2
# Check that the two lists of OrderedDicts are equivalent
assert sorted(
[list(data.items()) for data in serialized_profile["user_websites"]]
) == sorted([list(data.items()) for data in serialized_sites])
class TestUserWebsiteSerializer:
"""UserWebsiteSerializer tests"""
def test_serialize(self):
"""
Test serializing a user website
"""
user_website = UserWebsiteFactory.build()
assert UserWebsiteSerializer(user_website).data == {
"id": user_website.id,
"url": user_website.url,
"site_type": user_website.site_type,
}
def test_deserialize(self, mocker, user):
"""
Test deserializing a user website
"""
url = "https://example.com"
site_type = "dummy"
patched_get_site_type = mocker.patch(
"profiles.serializers.get_site_type_from_url", return_value=site_type
)
user_website_data = {"username": user.username, "url": url}
serializer = UserWebsiteSerializer(data=user_website_data)
is_valid = serializer.is_valid(raise_exception=True)
assert is_valid is True
assert serializer.validated_data["url"] == url
assert serializer.validated_data["site_type"] == site_type
assert serializer.validated_data["profile"] == user.profile
patched_get_site_type.assert_called_once_with(url)
@pytest.mark.parametrize(
"input_url,exp_result_url",
[("HTtPS://AbC.COM", "https://abc.com"), ("AbC.cOM", "http://abc.com")],
)
def test_user_website_url(self, mocker, user, input_url, exp_result_url):
"""
Test that deserializing a user website url adds a protocol if necessary and forces lowercase.
"""
site_type = "dummy"
mocker.patch(
"profiles.serializers.get_site_type_from_url", return_value=site_type
)
user_website_data = {"username": user.username, "url": input_url}
serializer = UserWebsiteSerializer(data=user_website_data)
is_valid = serializer.is_valid(raise_exception=True)
assert is_valid is True
assert serializer.validated_data["url"] == exp_result_url
def test_site_uniqueness(self, user):
"""
Test that a user can only save one of a specific type of site
"""
UserWebsiteFactory.create(
profile=user.profile, url="facebook.com/1", site_type=FACEBOOK_DOMAIN
)
user_website_data = {"username": user.username, "url": "facebook.com/2"}
serializer = UserWebsiteSerializer(data=user_website_data)
with pytest.raises(
ValidationError, match="A website of this type has already been saved."
):
serializer.is_valid(raise_exception=True)
serializer.save()
| |
"""
Module providing easy API for working with remote files and folders.
"""
from __future__ import with_statement
import hashlib
import tempfile
import re
import os
from StringIO import StringIO
from functools import partial
from fabric.api import *
from fabric.utils import apply_lcwd
def exists(path, use_sudo=False, verbose=False):
"""
Return True if given path exists on the current remote host.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
`exists` will, by default, hide all output (including the run line, stdout,
stderr and any warning resulting from the file not existing) in order to
avoid cluttering output. You may specify ``verbose=True`` to change this
behavior.
"""
func = use_sudo and sudo or run
cmd = 'test -e %s' % _expand_path(path)
# If verbose, run normally
if verbose:
with settings(warn_only=True):
return not func(cmd).failed
# Otherwise, be quiet
with settings(hide('everything'), warn_only=True):
return not func(cmd).failed
def is_link(path, use_sudo=False, verbose=False):
"""
Return True if the given path is a symlink on the current remote host.
If ``use_sudo`` is True, will use `.sudo` instead of `.run`.
`.is_link` will, by default, hide all output. Give ``verbose=True`` to change this.
"""
func = sudo if use_sudo else run
cmd = 'test -L "$(echo %s)"' % path
args, kwargs = [], {'warn_only': True}
if not verbose:
opts = [hide('everything')]
with settings(*args, **kwargs):
return func(cmd).succeeded
def first(*args, **kwargs):
"""
Given one or more file paths, returns first one found, or None if none
exist. May specify ``use_sudo`` and ``verbose`` which are passed to `exists`.
"""
for directory in args:
if exists(directory, **kwargs):
return directory
def upload_template(filename, destination, context=None, use_jinja=False,
template_dir=None, use_sudo=False, backup=True, mirror_local_mode=False,
mode=None, pty=None):
"""
Render and upload a template text file to a remote host.
Returns the result of the inner call to `~fabric.operations.put` -- see its
documentation for details.
``filename`` should be the path to a text file, which may contain `Python
string interpolation formatting
<http://docs.python.org/library/stdtypes.html#string-formatting>`_ and will
be rendered with the given context dictionary ``context`` (if given.)
Alternately, if ``use_jinja`` is set to True and you have the Jinja2
templating library available, Jinja will be used to render the template
instead. Templates will be loaded from the invoking user's current working
directory by default, or from ``template_dir`` if given.
The resulting rendered file will be uploaded to the remote file path
``destination``. If the destination file already exists, it will be
renamed with a ``.bak`` extension unless ``backup=False`` is specified.
By default, the file will be copied to ``destination`` as the logged-in
user; specify ``use_sudo=True`` to use `sudo` instead.
The ``mirror_local_mode`` and ``mode`` kwargs are passed directly to an
internal `~fabric.operations.put` call; please see its documentation for
details on these two options.
The ``pty`` kwarg will be passed verbatim to any internal
`~fabric.operations.run`/`~fabric.operations.sudo` calls, such as those
used for testing directory-ness, making backups, etc.
.. versionchanged:: 1.1
Added the ``backup``, ``mirror_local_mode`` and ``mode`` kwargs.
.. versionchanged:: 1.9
Added the ``pty`` kwarg.
"""
func = use_sudo and sudo or run
if pty is not None:
func = partial(func, pty=pty)
# Normalize destination to be an actual filename, due to using StringIO
with settings(hide('everything'), warn_only=True):
if func('test -d %s' % _expand_path(destination)).succeeded:
sep = "" if destination.endswith('/') else "/"
destination += sep + os.path.basename(filename)
# Use mode kwarg to implement mirror_local_mode, again due to using
# StringIO
if mirror_local_mode and mode is None:
mode = os.stat(filename).st_mode
# To prevent put() from trying to do this
# logic itself
mirror_local_mode = False
# Process template
text = None
if use_jinja:
try:
template_dir = template_dir or os.getcwd()
template_dir = apply_lcwd(template_dir, env)
from jinja2 import Environment, FileSystemLoader
jenv = Environment(loader=FileSystemLoader(template_dir))
text = jenv.get_template(filename).render(**context or {})
# Force to a byte representation of Unicode, or str()ification
# within Paramiko's SFTP machinery may cause decode issues for
# truly non-ASCII characters.
text = text.encode('utf-8')
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + "\nUnable to import Jinja2 -- see above.")
else:
filename = apply_lcwd(filename, env)
with open(os.path.expanduser(filename)) as inputfile:
text = inputfile.read()
if context:
text = text % context
# Back up original file
if backup and exists(destination):
func("cp %s{,.bak}" % _expand_path(destination))
# Upload the file.
return put(
local_path=StringIO(text),
remote_path=destination,
use_sudo=use_sudo,
mirror_local_mode=mirror_local_mode,
mode=mode
)
def sed(filename, before, after, limit='', use_sudo=False, backup='.bak',
flags='', shell=False):
"""
Run a search-and-replace on ``filename`` with given regex patterns.
Equivalent to ``sed -i<backup> -r -e "/<limit>/ s/<before>/<after>/<flags>g"
<filename>``. Setting ``backup`` to an empty string will, disable backup
file creation.
For convenience, ``before`` and ``after`` will automatically escape forward
slashes, single quotes and parentheses for you, so you don't need to
specify e.g. ``http:\/\/foo\.com``, instead just using ``http://foo\.com``
is fine.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
The ``shell`` argument will be eventually passed to `run`/`sudo`. It
defaults to False in order to avoid problems with many nested levels of
quotes and backslashes. However, setting it to True may help when using
``~fabric.operations.cd`` to wrap explicit or implicit ``sudo`` calls.
(``cd`` by it's nature is a shell built-in, not a standalone command, so it
should be called within a shell.)
Other options may be specified with sed-compatible regex flags -- for
example, to make the search and replace case insensitive, specify
``flags="i"``. The ``g`` flag is always specified regardless, so you do not
need to remember to include it when overriding this parameter.
.. versionadded:: 1.1
The ``flags`` parameter.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
"""
func = use_sudo and sudo or run
# Characters to be escaped in both
for char in "/'":
before = before.replace(char, r'\%s' % char)
after = after.replace(char, r'\%s' % char)
# Characters to be escaped in replacement only (they're useful in regexen
# in the 'before' part)
for char in "()":
after = after.replace(char, r'\%s' % char)
if limit:
limit = r'/%s/ ' % limit
context = {
'script': r"'%ss/%s/%s/%sg'" % (limit, before, after, flags),
'filename': _expand_path(filename),
'backup': backup
}
# Test the OS because of differences between sed versions
with hide('running', 'stdout'):
platform = run("uname")
if platform in ('NetBSD', 'OpenBSD', 'QNX'):
# Attempt to protect against failures/collisions
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(filename)
context['tmp'] = "/tmp/%s" % hasher.hexdigest()
# Use temp file to work around lack of -i
expr = r"""cp -p %(filename)s %(tmp)s \
&& sed -r -e %(script)s %(filename)s > %(tmp)s \
&& cp -p %(filename)s %(filename)s%(backup)s \
&& mv %(tmp)s %(filename)s"""
else:
context['extended_regex'] = '-E' if platform == 'Darwin' else '-r'
expr = r"sed -i%(backup)s %(extended_regex)s -e %(script)s %(filename)s"
command = expr % context
return func(command, shell=shell)
def uncomment(filename, regex, use_sudo=False, char='#', backup='.bak',
shell=False):
"""
Attempt to uncomment all lines in ``filename`` matching ``regex``.
The default comment delimiter is `#` and may be overridden by the ``char``
argument.
This function uses the `sed` function, and will accept the same
``use_sudo``, ``shell`` and ``backup`` keyword arguments that `sed` does.
`uncomment` will remove a single whitespace character following the comment
character, if it exists, but will preserve all preceding whitespace. For
example, ``# foo`` would become ``foo`` (the single space is stripped) but
`` # foo`` would become `` foo`` (the single space is still stripped,
but the preceding 4 spaces are not.)
.. versionchanged:: 1.6
Added the ``shell`` keyword argument.
"""
return sed(
filename,
before=r'^([[:space:]]*)%s[[:space:]]?' % char,
after=r'\1',
limit=regex,
use_sudo=use_sudo,
backup=backup,
shell=shell
)
def comment(filename, regex, use_sudo=False, char='#', backup='.bak',
shell=False):
"""
Attempt to comment out all lines in ``filename`` matching ``regex``.
The default commenting character is `#` and may be overridden by the
``char`` argument.
This function uses the `sed` function, and will accept the same
``use_sudo``, ``shell`` and ``backup`` keyword arguments that `sed` does.
`comment` will prepend the comment character to the beginning of the line,
so that lines end up looking like so::
this line is uncommented
#this line is commented
# this line is indented and commented
In other words, comment characters will not "follow" indentation as they
sometimes do when inserted by hand. Neither will they have a trailing space
unless you specify e.g. ``char='# '``.
.. note::
In order to preserve the line being commented out, this function will
wrap your ``regex`` argument in parentheses, so you don't need to. It
will ensure that any preceding/trailing ``^`` or ``$`` characters are
correctly moved outside the parentheses. For example, calling
``comment(filename, r'^foo$')`` will result in a `sed` call with the
"before" regex of ``r'^(foo)$'`` (and the "after" regex, naturally, of
``r'#\\1'``.)
.. versionadded:: 1.5
Added the ``shell`` keyword argument.
"""
carot, dollar = '', ''
if regex.startswith('^'):
carot = '^'
regex = regex[1:]
if regex.endswith('$'):
dollar = '$'
regex = regex[:-1]
regex = "%s(%s)%s" % (carot, regex, dollar)
return sed(
filename,
before=regex,
after=r'%s\1' % char,
use_sudo=use_sudo,
backup=backup,
shell=shell
)
def contains(filename, text, exact=False, use_sudo=False, escape=True,
shell=False):
"""
Return True if ``filename`` contains ``text`` (which may be a regex.)
By default, this function will consider a partial line match (i.e. where
``text`` only makes up part of the line it's on). Specify ``exact=True`` to
change this behavior so that only a line containing exactly ``text``
results in a True return value.
This function leverages ``egrep`` on the remote end (so it may not follow
Python regular expression syntax perfectly), and skips ``env.shell``
wrapper by default.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
If ``escape`` is False, no extra regular expression related escaping is
performed (this includes overriding ``exact`` so that no ``^``/``$`` is
added.)
The ``shell`` argument will be eventually passed to ``run/sudo``. See
description of the same argumnet in ``~fabric.contrib.sed`` for details.
.. versionchanged:: 1.0
Swapped the order of the ``filename`` and ``text`` arguments to be
consistent with other functions in this module.
.. versionchanged:: 1.4
Updated the regular expression related escaping to try and solve
various corner cases.
.. versionchanged:: 1.4
Added ``escape`` keyword argument.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
"""
func = use_sudo and sudo or run
if escape:
text = _escape_for_regex(text)
if exact:
text = "^%s$" % text
with settings(hide('everything'), warn_only=True):
egrep_cmd = 'egrep "%s" %s' % (text, _expand_path(filename))
return func(egrep_cmd, shell=shell).succeeded
def append(filename, text, use_sudo=False, partial=False, escape=True,
shell=False):
"""
Append string (or list of strings) ``text`` to ``filename``.
When a list is given, each string inside is handled independently (but in
the order given.)
If ``text`` is already found in ``filename``, the append is not run, and
None is returned immediately. Otherwise, the given text is appended to the
end of the given ``filename`` via e.g. ``echo '$text' >> $filename``.
The test for whether ``text`` already exists defaults to a full line match,
e.g. ``^<text>$``, as this seems to be the most sensible approach for the
"append lines to a file" use case. You may override this and force partial
searching (e.g. ``^<text>``) by specifying ``partial=True``.
Because ``text`` is single-quoted, single quotes will be transparently
backslash-escaped. This can be disabled with ``escape=False``.
If ``use_sudo`` is True, will use `sudo` instead of `run`.
The ``shell`` argument will be eventually passed to ``run/sudo``. See
description of the same argumnet in ``~fabric.contrib.sed`` for details.
.. versionchanged:: 0.9.1
Added the ``partial`` keyword argument.
.. versionchanged:: 1.0
Swapped the order of the ``filename`` and ``text`` arguments to be
consistent with other functions in this module.
.. versionchanged:: 1.0
Changed default value of ``partial`` kwarg to be ``False``.
.. versionchanged:: 1.4
Updated the regular expression related escaping to try and solve
various corner cases.
.. versionadded:: 1.6
Added the ``shell`` keyword argument.
"""
func = use_sudo and sudo or run
# Normalize non-list input to be a list
if isinstance(text, basestring):
text = [text]
for line in text:
regex = '^' + _escape_for_regex(line) + ('' if partial else '$')
if (exists(filename, use_sudo=use_sudo) and line
and contains(filename, regex, use_sudo=use_sudo, escape=False,
shell=shell)):
continue
line = line.replace("'", r"'\\''") if escape else line
func("echo '%s' >> %s" % (line, _expand_path(filename)))
def _escape_for_regex(text):
"""Escape ``text`` to allow literal matching using egrep"""
regex = re.escape(text)
# Seems like double escaping is needed for \
regex = regex.replace('\\\\', '\\\\\\')
# Triple-escaping seems to be required for $ signs
regex = regex.replace(r'\$', r'\\\$')
# Whereas single quotes should not be escaped
regex = regex.replace(r"\'", "'")
return regex
def _expand_path(path):
return '"$(echo %s)"' % path
| |
#!/usr/bin/python
# Title make-network-acls.py
# Description Create network ACLs on AWS
# Author I-Ming Chen <imchen@red5studios.com>
# Date 2014-09-22
# Version 0.0.1
# Usage ./make-network-acls.py
# ./make-network-acls.py --vpcid vpc-123456 --config ../path/to/file.yml --cidrlist ../path/to/json --profile prod-sa-east-1 [--dryrun] [--verbose]
# Notes Requires AWS CLI installed
#============================================================================
import re
import sys
import subprocess
import shlex
import argparse
import readline
import yaml
import json
DRYRUN="0" #NYI
CONFIRMATION="no"
def parse_options():
parser = argparse.ArgumentParser()
parser.add_argument("--vpcid", help="VPC ID")
parser.add_argument("--config", help="Yaml config of security group rules")
parser.add_argument("--cidrlist", help="CIDR/Name list (JSON)")
parser.add_argument("--profile", help="AWS CLI Profile")
parser.add_argument("--disable-acl", action="store_true", help="Disable acl output")
parser.add_argument("--dryrun", action="store_true", help="Dry run")
parser.add_argument("--verbose", action="store_true", help="Verbose mode")
args = parser.parse_args()
return args
def parse_config(config):
content = None
with file(config, 'r') as fp:
content = yaml.load(fp)
return content
def parse_cidr_list(json_file):
json_data = open(json_file)
data = json.load(json_data)
json_data.close()
return data
def preview_acl(acl_name, rules):
print "We'll make X ACLs"
print "We'll make Y Rules"
def create_acl(cidr_list, acl_name, vpcid, profile, joined_dict):
print "aws ec2 create-network-acl --vpc-id %s --profile %s" % (vpcid, profile)
command = "aws ec2 create-network-acl --vpc-id %s --profile %s" % (vpcid, profile)
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
output = process.communicate()[0]
network_acl = json.loads(output)
network_acl_id = network_acl["NetworkAcl"]["NetworkAclId"]
acl_name_check = acl_name.split("-")
del acl_name_check[1]
for subnets, cidr_names in cidr_list.items():
if acl_name_check == cidr_names.split("-"):
# Subnet now needs to match the joined dictionary
# Chop off the first 2 parts of the string which is "10.x" or where x is the regional subnet
# Basically comparing the tail end of the CIDR block: "1.0/24" vs "1.0/24"
for key, value in joined_dict.items():
candidate = key.split(".")
del candidate[0:2]
subnet_master = subnets.split(".")
del subnet_master[0:2]
if subnet_master == candidate:
network_acl_association_id = value
# Reassociate Network ACLs with correct subnets since we just created the ACLs
# aws ec2 replace-network-acl-association --association-id aclassoc-e5b95c8c --network-acl-id acl-5fb85d36
print "aws ec2 replace-network-acl-association --association-id %s --network-acl-id %s --profile %s" % (network_acl_association_id, network_acl_id, profile)
command = "aws ec2 replace-network-acl-association --association-id %s --network-acl-id %s --profile %s" % (network_acl_association_id, network_acl_id, profile)
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
output = process.communicate()[0]
# We put the re-association deep inside the loop in the event that multiple subnets need to be reassigned to an ACL
return network_acl_id
def tag_acl(acl_name, network_acl_id, profile):
print "aws ec2 create-tags --resources %s --tags Key=Name,Value=%s --profile %s" % (network_acl_id, acl_name, profile)
command = "aws ec2 create-tags --resources %s --tags Key=Name,Value=%s --profile %s" % (network_acl_id, acl_name, profile)
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
output = process.communicate()[0] #Use this to verify it was successful
# The original idea was for this function to be atomic (one rule creation per call)
# However, adapting Nick's code means this is not possible since his for loop iterates through a set of rules before moving on
def create_acl_entry_set(acl_name, network_acl_id, rules, profile):
# Process rules
start_rule_numbers = {
"icmp": 200,
"tcp": 500,
"udp": 600,
"all": 100
}
print "Creating set for " + str(acl_name) + ":"
for protocol, acl in rules.iteritems():
if "inbound" in acl:
rule_num = start_rule_numbers[protocol]
rule_action = "allow"
traffic_flow = "ingress"
rules = acl.get("inbound")
for rule_hash in rules:
for rule, cidr in sorted(rule_hash.iteritems()):
# Find port ranges
if (re.search("-",str(rule))):
ports = str(rule).split("-")
port_start = ports[0]
port_end = ports[1]
port_range = "--port-range From=%s,To=%s" % (port_start, port_end)
else:
port_start = rule
port_end = rule
port_range = "--port-range From=%s,To=%s" % (port_start, port_end)
cidr_block = cidr
if protocol == "icmp":
if rule == "all":
port_range = "--icmp-type-code Type=-1,Code=-1"
# aws ec2 create-network-acl-entry --network-acl-id acl-5fb85d36 --rule-number 100 --protocol udp \
# --rule-action allow --ingress --cidr-block 0.0.0.0/0 --port-range From=53,To=53
print "aws ec2 create-network-acl-entry --network-acl-id %s --rule-number %s --protocol %s --rule-action %s --%s --cidr-block %s %s --profile %s" \
% (network_acl_id, rule_num, protocol, rule_action, traffic_flow, cidr_block, port_range, profile)
command = "aws ec2 create-network-acl-entry --network-acl-id %s --rule-number %s --protocol %s \
--rule-action %s --%s --cidr-block %s %s --profile %s" \
% (network_acl_id, rule_num, protocol, rule_action, traffic_flow, cidr_block, port_range, profile)
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
output = process.communicate()[0]
rule_num += 1
if "outbound" in acl:
rule_num = start_rule_numbers[protocol]
rule_action = "allow"
traffic_flow = "egress"
rules = acl.get("outbound")
for rule_hash in rules:
for rule, cidr in sorted(rule_hash.iteritems()):
# Find port ranges
if (re.search("-",str(rule))):
ports = str(rule).split("-")
port_start = ports[0]
port_end = ports[1]
else:
port_start = rule
port_end = rule
cidr_block = cidr
if protocol == "icmp":
if rule == "all":
port_range = "--icmp-type-code Type=-1,Code=-1"
print "aws ec2 create-network-acl-entry --network-acl-id %s --rule-number %s --protocol %s --rule-action %s --%s --cidr-block %s %s --profile %s" \
% (network_acl_id, rule_num, protocol, rule_action, traffic_flow, cidr_block, port_range, profile)
command = "aws ec2 create-network-acl-entry --network-acl-id %s --rule-number %s --protocol %s \
--rule-action %s --%s --cidr-block %s --port-range From=%s,To=%s --profile %s" \
% (network_acl_id, rule_num, protocol, rule_action, traffic_flow, cidr_block, port_start, port_end, profile)
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
output = process.communicate()[0]
rule_num += 1
def interactive(args):
global CONFIRMATION
args.vpcid = raw_input("Enter VPC-ID: ")
args.config = raw_input("Enter config YML file (include path): ")
args.profile = raw_input("Enter AWS CLI profile name: ")
#for acl_name, rules in config.get("network-acls").iteritems():
print "I plan to preview something here with preview_acl()"
CONFIRMATION = raw_input("Is this what you want to build? [YES/NO] ")
def main(args):
# Detector not debugged yet
# Determine if we have aws cli installed
print "====================="
print "AWS Network ACL Creator!"
global CONFIRMATION
# Check if interactive version or otherwise
if not args.profile:
interactive(args)
else:
CONFIRMATION="YES"
config = parse_config(args.config)
cidr_list = parse_cidr_list(args.cidrlist)
# Subnets default to the primary ACL when created. We will need to reassociate them.
# In order to do that we must map subnets (CIDR) to the network association ID so we can bind them to the correct network ACL
# Let's map out subnet CIDR and IDs first
command = "aws ec2 describe-subnets --profile %s" % (args.profile)
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
output = process.communicate()[0]
subnets_struct = json.loads(output)
subnets_dict = {}
for subnets_entry in subnets_struct["Subnets"]:
subnets_dict.update({subnets_entry["SubnetId"]:subnets_entry["CidrBlock"]})
# Let's map out the network ACLs and how they are associated with subnets next
command = "aws ec2 describe-network-acls --profile %s" % (args.profile)
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
output = process.communicate()[0]
network_acl_struct = json.loads(output)
network_acl_dict = {}
for associations in network_acl_struct["NetworkAcls"]:
for subnets_assocs in associations["Associations"]:
network_acl_dict.update({subnets_assocs["SubnetId"]:subnets_assocs["NetworkAclAssociationId"]})
joined_dict = {}
for subnets_key, subnets_value in subnets_dict.items():
for network_acl_key, network_acl_value in network_acl_dict.items():
if subnets_key == network_acl_key:
joined_dict.update({subnets_value:network_acl_value})
if (CONFIRMATION == "YES" or CONFIRMATION == "yes" or CONFIRMATION == "Y" or CONFIRMATION == "y"):
print "Creating network ACLs!"
for acl_name, rules in config.get("network-acls").iteritems():
network_acl_id = create_acl(cidr_list, acl_name, args.vpcid, args.profile, joined_dict)
tag_acl(acl_name, network_acl_id, args.profile)
create_acl_entry_set(acl_name, network_acl_id, rules, args.profile)
print "### Done creating network ACLs! ###"
return
else:
print "Okay, canceling."
sys.exit(1) #Failure
if __name__ == '__main__':
# Get settings
args = parse_options()
# Verify correct number of arguments before running
if len(sys.argv) == 1:
main(args)
elif len(sys.argv) == 9:
main(args)
else:
print "ERROR: Wrong number of arguments."
sys.exit(1)
sys.exit()
| |
# Copyright 2012 VMware, Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from neutronclient.common import exceptions
from neutronclient.neutron.v2_0 import router
from neutronclient.tests.unit import test_cli20
class CLITestV20RouterJSON(test_cli20.CLITestV20Base):
def test_create_router(self):
# Create router: router1.
resource = 'router'
cmd = router.CreateRouter(test_cli20.MyApp(sys.stdout), None)
name = 'router1'
myid = 'myid'
args = [name, '--description', 'rooter']
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
description='rooter')
def test_create_router_flavor(self):
resource = 'router'
cmd = router.CreateRouter(test_cli20.MyApp(sys.stdout), None)
name = 'router1'
myid = 'myid'
flavor = 'router-flavor'
args = [name, '--flavor', flavor]
position_names = ['name', ]
position_values = [name, flavor]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
flavor_id='router-flavor')
def test_create_router_tenant(self):
# Create router: --tenant_id tenantid myname.
resource = 'router'
cmd = router.CreateRouter(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--tenant_id', 'tenantid', name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_router_admin_state(self):
# Create router: --admin_state_down myname.
resource = 'router'
cmd = router.CreateRouter(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--admin_state_down', name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
def _create_router_distributed_or_ha(self, distributed=None, ha=None):
# Create router: --distributed distributed --ha ha myname.
resource = 'router'
cmd = router.CreateRouter(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = []
if distributed is not None:
args += ['--distributed', str(distributed)]
if ha is not None:
args += ['--ha', str(ha)]
args.append(name)
position_names = ['name', ]
position_values = [name, ]
expected = {}
if distributed is not None:
expected['distributed'] = str(distributed)
if ha is not None:
expected['ha'] = str(ha)
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
**expected)
def test_create_router_distributed_True(self):
# Create router: --distributed=True.
self._create_router_distributed_or_ha(distributed='True')
def test_create_router_ha_with_True(self):
self._create_router_distributed_or_ha(ha='True')
def test_create_router_ha_with_true(self):
self._create_router_distributed_or_ha(ha='true')
def test_create_router_ha_with_False(self):
self._create_router_distributed_or_ha(ha='False')
def test_create_router_ha_with_false(self):
self._create_router_distributed_or_ha(ha='false')
def test_create_router_distributed_False(self):
# Create router: --distributed=False.
self._create_router_distributed_or_ha(distributed='False')
def test_create_router_distributed_true(self):
# Create router: --distributed=true.
self._create_router_distributed_or_ha(distributed='true')
def test_create_router_distributed_false(self):
# Create router: --distributed=false.
self._create_router_distributed_or_ha(distributed='false')
def test_create_router_with_az_hint(self):
# Create router: --availability-zone-hint zone1
# --availability-zone-hint zone2.
resource = 'router'
cmd = router.CreateRouter(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--availability-zone-hint', 'zone1',
'--availability-zone-hint', 'zone2', name]
position_names = ['availability_zone_hints', 'name']
position_values = [['zone1', 'zone2'], name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_routers_detail(self):
# list routers: -D.
resources = "routers"
cmd = router.ListRouter(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_routers_pagination(self):
resources = "routers"
cmd = router.ListRouter(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_routers_sort(self):
# list routers:
# --sort-key name --sort-key id --sort-key asc --sort-key desc
resources = "routers"
cmd = router.ListRouter(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_routers_limit(self):
# list routers: -P.
resources = "routers"
cmd = router.ListRouter(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_update_router_exception(self):
# Update router: myid.
resource = 'router'
cmd = router.UpdateRouter(test_cli20.MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_update_router(self):
# Update router: myid --name myname --tags a b.
resource = 'router'
cmd = router.UpdateRouter(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--description', ':D'],
{'name': 'myname', 'description': ':D'})
def test_update_router_admin_state(self):
# Update router: myid --admin-state-up <True|False>.
resource = 'router'
cmd = router.UpdateRouter(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--admin-state-up', 'True'],
{'admin_state_up': 'True'}
)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--admin-state-up', 'true'],
{'admin_state_up': 'true'}
)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--admin-state-up', 'False'],
{'admin_state_up': 'False'}
)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--admin-state-up', 'false'],
{'admin_state_up': 'false'}
)
def test_update_router_distributed(self):
# Update router: myid --distributed <True|False>.
resource = 'router'
cmd = router.UpdateRouter(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--distributed', 'True'],
{'distributed': 'True'}
)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--distributed', 'true'],
{'distributed': 'true'}
)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--distributed', 'False'],
{'distributed': 'False'}
)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--distributed', 'false'],
{'distributed': 'false'}
)
def test_update_router_no_routes(self):
# Update router: myid --no-routes
resource = 'router'
cmd = router.UpdateRouter(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--no-routes'],
{'routes': None})
def test_update_router_add_route(self):
# Update router: myid --route destination=10.0.3.0/24,nexthop=10.0.0.10
resource = 'router'
cmd = router.UpdateRouter(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid,
'--route',
'destination=10.0.3.0/24,nexthop=10.0.0.10']
routes = [{'destination': '10.0.3.0/24',
'nexthop': '10.0.0.10'}]
updatefields = {'routes': routes}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_router_add_routes(self):
# Update router: myid --route destination=10.0.3.0/24,nexthop=10.0.0.10
# --route destination=fd7a:1d63:2063::/64,
# nexthop=fd7a:1d63:2063:0:f816:3eff:fe0e:a697
resource = 'router'
cmd = router.UpdateRouter(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid,
'--route',
'destination=10.0.3.0/24,nexthop=10.0.0.10',
'--route',
'destination=fd7a:1d63:2063::/64,'
'nexthop=fd7a:1d63:2063:0:f816:3eff:fe0e:a697']
routes = [{'destination': '10.0.3.0/24',
'nexthop': '10.0.0.10'},
{'destination': 'fd7a:1d63:2063::/64',
'nexthop': 'fd7a:1d63:2063:0:f816:3eff:fe0e:a697'}]
updatefields = {'routes': routes}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_router_no_routes_with_add_route(self):
# Update router: --no-routes with --route
resource = 'router'
cmd = router.UpdateRouter(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid,
'--no-routes',
'--route',
'destination=10.0.3.0/24,nexthop=10.0.0.10']
exception = self.assertRaises(SystemExit,
self._test_update_resource,
resource, cmd, myid, args, None)
self.assertEqual(2, exception.code)
def test_delete_router(self):
# Delete router: myid.
resource = 'router'
cmd = router.DeleteRouter(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_show_router(self):
# Show router: myid.
resource = 'router'
cmd = router.ShowRouter(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def _test_add_remove_interface(self, action, mode, cmd, args):
resource = 'router'
subcmd = '%s_router_interface' % action
if mode == 'port':
body = {'port_id': 'portid'}
else:
body = {'subnet_id': 'subnetid'}
if action == 'add':
retval = {'subnet_id': 'subnetid', 'port_id': 'portid'}
else:
retval = None
self._test_update_resource_action(resource, cmd, 'myid',
subcmd, args,
body, retval)
def test_add_interface_compat(self):
# Add interface to router: myid subnetid.
cmd = router.AddInterfaceRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'subnetid']
self._test_add_remove_interface('add', 'subnet', cmd, args)
def test_add_interface_by_subnet(self):
# Add interface to router: myid subnet=subnetid.
cmd = router.AddInterfaceRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'subnet=subnetid']
self._test_add_remove_interface('add', 'subnet', cmd, args)
def test_add_interface_by_port(self):
# Add interface to router: myid port=portid.
cmd = router.AddInterfaceRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'port=portid']
self._test_add_remove_interface('add', 'port', cmd, args)
def test_del_interface_compat(self):
# Delete interface from router: myid subnetid.
cmd = router.RemoveInterfaceRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'subnetid']
self._test_add_remove_interface('remove', 'subnet', cmd, args)
def test_del_interface_by_subnet(self):
# Delete interface from router: myid subnet=subnetid.
cmd = router.RemoveInterfaceRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'subnet=subnetid']
self._test_add_remove_interface('remove', 'subnet', cmd, args)
def test_del_interface_by_port(self):
# Delete interface from router: myid port=portid.
cmd = router.RemoveInterfaceRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'port=portid']
self._test_add_remove_interface('remove', 'port', cmd, args)
def test_set_gateway(self):
# Set external gateway for router: myid externalid.
resource = 'router'
cmd = router.SetGatewayRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'externalid']
self._test_update_resource(resource, cmd, 'myid',
args,
{"external_gateway_info":
{"network_id": "externalid"}}
)
def test_set_gateway_enable_snat(self):
# enable external gateway for router: myid externalid.
resource = 'router'
cmd = router.SetGatewayRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'externalid', '--enable-snat']
self._test_update_resource(resource, cmd, 'myid',
args,
{"external_gateway_info":
{"network_id": "externalid",
"enable_snat": True}}
)
def test_set_gateway_disable_snat(self):
# set external gateway for router: myid externalid.
resource = 'router'
cmd = router.SetGatewayRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'externalid', '--disable-snat']
self._test_update_resource(resource, cmd, 'myid',
args,
{"external_gateway_info":
{"network_id": "externalid",
"enable_snat": False}}
)
def test_set_gateway_external_ip(self):
# set external gateway for router: myid externalid --fixed-ip ...
resource = 'router'
cmd = router.SetGatewayRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'externalid', '--fixed-ip', 'ip_address=10.0.0.2']
self._test_update_resource(resource, cmd, 'myid',
args,
{"external_gateway_info":
{"network_id": "externalid",
"external_fixed_ips": [
{"ip_address": "10.0.0.2"}]}}
)
def test_set_gateway_external_subnet(self):
# set external gateway for router: myid externalid --fixed-ip ...
resource = 'router'
cmd = router.SetGatewayRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'externalid', '--fixed-ip', 'subnet_id=mysubnet']
self._test_update_resource(resource, cmd, 'myid',
args,
{"external_gateway_info":
{"network_id": "externalid",
"external_fixed_ips": [
{"subnet_id": "mysubnet"}]}}
)
def test_set_gateway_external_ip_and_subnet(self):
# set external gateway for router: myid externalid --fixed-ip ...
resource = 'router'
cmd = router.SetGatewayRouter(test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'externalid', '--fixed-ip',
'ip_address=10.0.0.2,subnet_id=mysubnet']
self._test_update_resource(resource, cmd, 'myid',
args,
{"external_gateway_info":
{"network_id": "externalid",
"external_fixed_ips": [
{"subnet_id": "mysubnet",
"ip_address": "10.0.0.2"}]}}
)
def test_remove_gateway(self):
# Remove external gateway from router: externalid.
resource = 'router'
cmd = router.RemoveGatewayRouter(test_cli20.MyApp(sys.stdout), None)
args = ['externalid']
self._test_update_resource(resource, cmd, 'externalid',
args, {"external_gateway_info": {}}
)
| |
from corehq.apps.domain.models import Domain
from corehq.apps.sms.api import (
MessageMetadata,
add_msg_tags,
send_sms_to_verified_number,
log_sms_exception,
)
from corehq.apps.sms.messages import *
from corehq.apps.sms.util import format_message_list, get_date_format
from touchforms.formplayer.api import current_question
from corehq.apps.smsforms.app import (
_get_responses,
_responses_to_text,
)
from corehq.apps.smsforms.models import SQLXFormsSession
def form_session_handler(v, text, msg):
"""
The form session handler will use the inbound text to answer the next question
in the open SQLXformsSession for the associated contact. If no session is open,
the handler passes. If multiple sessions are open, they are all closed and an
error message is displayed to the user.
"""
multiple, session = get_single_open_session_or_close_multiple(v.domain, v.owner_id)
if multiple:
send_sms_to_verified_number(v, get_message(MSG_MULTIPLE_SESSIONS, v))
return True
if session:
# Metadata to be applied to the inbound message
inbound_metadata = MessageMetadata(
workflow=session.workflow,
reminder_id=session.reminder_id,
xforms_session_couch_id=session._id,
)
add_msg_tags(msg, inbound_metadata)
try:
answer_next_question(v, text, msg, session)
except Exception:
# Catch any touchforms errors
log_sms_exception(msg)
send_sms_to_verified_number(v, get_message(MSG_TOUCHFORMS_DOWN, v))
return True
else:
return False
def get_single_open_session_or_close_multiple(domain, contact_id):
"""
Retrieves the current open SQLXFormsSession for the given contact.
If multiple sessions are open, it closes all of them and returns
None for the session.
The return value is a tuple of (multiple, session), where multiple
is True if there were multiple sessions, and session is the session if
there was a single open session available.
"""
sessions = SQLXFormsSession.get_all_open_sms_sessions(domain, contact_id)
count = sessions.count()
if count > 1:
for session in sessions:
session.end(False)
session.save()
return (True, None)
session = sessions[0] if count == 1 else None
return (False, session)
def answer_next_question(v, text, msg, session):
resp = current_question(session.session_id)
event = resp.event
valid, text, error_msg = validate_answer(event, text, v)
# metadata to be applied to the reply message
outbound_metadata = MessageMetadata(
workflow=session.workflow,
reminder_id=session.reminder_id,
xforms_session_couch_id=session._id,
)
if valid:
responses = _get_responses(v.domain, v.owner_id, text,
yield_responses=True)
if has_invalid_response(responses):
mark_as_invalid_response(msg)
text_responses = _responses_to_text(responses)
if len(text_responses) > 0:
response_text = format_message_list(text_responses)
send_sms_to_verified_number(v, response_text,
metadata=outbound_metadata)
else:
mark_as_invalid_response(msg)
response_text = "%s %s" % (error_msg, event.text_prompt)
send_sms_to_verified_number(v, response_text,
metadata=outbound_metadata)
def validate_answer(event, text, v):
text = text.strip()
upper_text = text.upper()
valid = False
error_msg = ""
if text == "" and event._dict.get("required", False):
return (False, text, get_message(MSG_FIELD_REQUIRED, v))
# Validate select
if event.datatype == "select":
# Try to match on phrase (i.e., "Yes" or "No")
choices = format_choices(event._dict["choices"])
if upper_text in choices:
text = str(choices[upper_text])
valid = True
else:
try:
answer = int(text)
if answer >= 1 and answer <= len(event._dict["choices"]):
valid = True
else:
error_msg = get_message(MSG_CHOICE_OUT_OF_RANGE, v)
except ValueError:
error_msg = get_message(MSG_INVALID_CHOICE, v)
# Validate multiselect
elif event.datatype == "multiselect":
choices = format_choices(event._dict["choices"])
max_index = len(event._dict["choices"])
proposed_answers = text.split()
final_answers = {}
try:
for answer in proposed_answers:
upper_answer = answer.upper()
if upper_answer in choices:
final_answers[str(choices[upper_answer])] = ""
else:
int_answer = int(answer)
assert int_answer >= 1 and int_answer <= max_index
final_answers[str(int_answer)] = ""
text = " ".join(final_answers.keys())
valid = True
except Exception:
error_msg = get_message(MSG_INVALID_CHOICE, v)
# Validate int
elif event.datatype == "int":
try:
int(text)
valid = True
except ValueError:
error_msg = get_message(MSG_INVALID_INT, v)
# Validate float
elif event.datatype == "float":
try:
float(text)
valid = True
except ValueError:
error_msg = get_message(MSG_INVALID_FLOAT, v)
# Validate longint
elif event.datatype == "longint":
try:
long(text)
valid = True
except ValueError:
error_msg = get_message(MSG_INVALID_LONG, v)
# Validate date (Format: specified by Domain.sms_survey_date_format, default: YYYYMMDD)
elif event.datatype == "date":
domain_obj = Domain.get_by_name(v.domain)
df = get_date_format(domain_obj.sms_survey_date_format)
if df.is_valid(text):
try:
text = df.parse(text).strftime('%Y-%m-%d')
valid = True
except (ValueError, TypeError):
pass
if not valid:
error_msg = get_message(MSG_INVALID_DATE, v, context=(df.human_readable_format,))
# Validate time (Format: HHMM, 24-hour)
elif event.datatype == "time":
try:
assert len(text) == 4
hour = int(text[0:2])
minute = int(text[2:])
assert hour >= 0 and hour <= 23
assert minute >= 0 and minute <= 59
text = "%s:%s" % (hour, str(minute).zfill(2))
valid = True
except Exception:
error_msg = get_message(MSG_INVALID_TIME, v)
# Other question types pass
else:
valid = True
return (valid, text, error_msg)
def format_choices(choices_list):
choices = {}
for idx, choice in enumerate(choices_list):
choices[choice.strip().upper()] = idx + 1
return choices
def has_invalid_response(responses):
for r in responses:
if r.status == "validation-error":
return True
return False
def mark_as_invalid_response(msg):
msg.invalid_survey_response = True
msg.save()
| |
__author__ = 'jgrant'
from ogre_parse.basemodel import *
from pyparsing import ParseException
# TODO: for now, __repr__ = __str__, but I want to improve this depending on where __repr__ is used.
# http://stackoverflow.com/questions/1436703/difference-between-str-and-repr-in-python
# should be hooked up to a 'subreader.ReadTextureUnit' instance
class MTextureUnit(object):
def __init__(self, tokens=None):
self.name = ''
self.resource_type = 'texture'
self.resource_name = ''
self.image_format = ''
# self.cubic_images = {'front': '', 'back': '', 'left': '', 'right': '', 'up': '', 'down': ''}
# self.cubic_address_mode = ''
self.texture_alias = ''
self.tex_coord_set = int(0)
self.tex_address_mode = 'wrap'
self.tex_border_colour = Color()
self.filtering = 'linear linear point'
self.scale = array.array('f', [1.0, 1.0])
self.colour_op = 'modulate'
self.colour_op_ex = ''
self.colour_op_multipass_fallback = ''
self.binding_type = 'fragment'
self.env_map = 'off'
self.content_type = 'name'
self.indent = ''
if tokens:
tu = tokens.texture_unit
if tu.name:
self.name = tu.name
if tu.required:
# assume it is 'texture' until I support 'anim_texture' and 'cubic_texture'
self.resource_type = tu.required.resource_type
if tu.required.resource_properties:
if tu.required.resource_properties.name:
self.resource_name = tu.required.resource_properties.name
# an optional sub-property on the required property
if tu.required.resource_properties.format:
self.image_format = tu.required.resource_properties.format
# an optional sub-property on the required property
# if tu.required.resource_properties.type:
# # should be one of: 1d, 2d, 3d, cubic
# self.resource_texture_type = tu.required.resource_properties.type
else:
# TODO: throw exception because the resource name is required.
pass
if tu.texture_alias:
self.texture_alias = tu.texture_alias[0]
if tu.tex_coord_set:
self.tex_coord_set = tu.tex_coord_set[0]
if tu.tex_address_mode:
self.tex_address_mode = ' '.join(tu.tex_address_mode)
if tu.tex_border_colour:
self.tex_border_colour = tu.tex_border_colour
if tu.filtering:
self.filtering = ' '.join(tu.filtering[0])
if tu.scale:
self.scale[0] = tu.scale.x
self.scale[1] = tu.scale.y
if tu.colour_op:
self.colour_op = tu.colour_op[0]
if tu.colour_op_ex:
# tu.colour_op_ex can contain mixed types, e.g. string and float
# so we just store a string representation until there is a requirement to use
# individual elements of the property.
self.colour_op_ex = ' '.join(str(x) for x in tu.colour_op_ex[0].asList())
if tu.colour_op_multipass_fallback:
self.colour_op_multipass_fallback = ' '.join(tu.colour_op_multipass_fallback[0].asList())
if tu.binding_type:
self.binding_type = tu.binding_type[0]
if tu.env_map:
self.env_map = tu.env_map[0]
if tu.content_type:
self.content_type = ' '.join(str(x) for x in tu.content_type.asList())
def __str__(self):
loc_indent = 4*' '
repr = '\n' + self.indent + 'texture_unit' + ((' ' + self.name) if self.name else '')
repr += '\n' + self.indent + '{'
if self.texture_alias:
repr += '\n' + self.indent + loc_indent + 'texture_alias ' + self.texture_alias
# check the resource type
if self.resource_type == 'texture':
repr += '\n' + self.indent + loc_indent + self.resource_type + ' ' + self.resource_name
# (self.resource_texture_type if (self.resource_texture_type!='2d') else '')
elif self.resource_type == 'cubic_texture':
pass
elif self.resource_type == 'anim_texture':
pass
if self.tex_coord_set != int(0):
repr += '\n' + self.indent + loc_indent + 'tex_coord_set ' + str(self.tex_coord_set)
if self.tex_address_mode != 'wrap':
repr += '\n' + self.indent + loc_indent + 'tex_address_mode ' + self.tex_address_mode
if self.tex_border_colour != Color(vals=[0.0, 0.0, 0.0, 1.0]):
repr += '\n' + self.indent + loc_indent + 'tex_border_colour ' + str(self.tex_border_colour) + '\n'
if (self.filtering != 'bilinear') and (self.filtering != 'linear linear point'):
repr += '\n' + self.indent + loc_indent + 'filtering ' + self.filtering
if self.colour_op_ex:
repr += '\n' + self.indent + loc_indent + 'colour_op_ex ' + self.colour_op_ex
if self.colour_op_multipass_fallback:
repr += '\n' + self.indent + loc_indent + 'colour_op_multipass_fallback ' + self.colour_op_multipass_fallback
if not float_eq(self.scale[0], 1.0) or not float_eq(self.scale[0], 1.0):
repr += '\n' + self.indent + loc_indent + 'scale ' + str(self.scale[0]) + str(self.scale[1])
if self.colour_op != 'modulate':
repr += '\n' + self.indent + loc_indent + 'colour_op' + self.colour_op
if self.env_map != 'off':
repr += '\n' + self.indent + loc_indent + 'env_map ' + self.env_map
if self.binding_type != 'fragment':
repr += '\n' + self.indent + loc_indent + 'binding_type ' + self.binding_type
if self.content_type != 'name':
repr += '\n' + self.indent + loc_indent + 'content_type ' + self.content_type
repr += '\n' + self.indent + '}'
return repr
__repr__ = __str__
# should be hooked up to a 'subreader.ReadShaderReference' instance
class MShaderRef(object):
def __init__(self, tokens=None):
self.stage = ''
self.resource_name = ''
self.param_indexed = {}
self.param_indexed_auto = {}
self.param_named = {}
self.param_named_auto = {}
self.param_shared_ref = {}
self.indent = ''
if tokens:
shader = tokens.shader_ref
if shader.stage:
self.stage = shader.stage
else:
# how to throw exception?
# http://pyparsing.wikispaces.com/share/view/5613328
raise ParseException('ogre_parse::MShaderRef, missing shader stage, e.g. vertex_program_ref')
if shader.resource_name:
self.resource_name = shader.resource_name
else:
# how to throw exception?
# http://pyparsing.wikispaces.com/share/view/5613328
raise ParseException('ogre_parse::MShaderRef, missing shader resource name, e.g. myPhongShader')
if shader.param_named_auto:
for k, val in shader.param_named_auto.items():
self.param_named_auto.update({k: ' '.join(val)})
if shader.param_named:
for k, val in shader.param_named.items():
self.param_named.update({k: ' '.join(val)})
def __str__(self):
loc_indent = 4*' '
repr = ''
repr += '\n' + self.indent + self.stage + ' ' + self.resource_name
repr += '\n' + self.indent + '{'
# TODO: iterating over a dictionary produces results in a random order which can cause unit tests to fail.
# see unit test test_shaderref_param
# show 'param_named_auto'
for k, v in self.param_named_auto.items():
repr += '\n' + self.indent + loc_indent + 'param_named_auto ' + str(k) + ' ' + str(v)
# show 'param_named'
for k, v in self.param_named.items():
repr += '\n' + self.indent + loc_indent + 'param_named ' + str(k) + ' ' + str(v)
repr += '\n' + self.indent + '}'
return repr
__repr__ = __str__
# should be hooked up to a 'subreader.ReadPass' instance
class MPass(object):
def __init__(self, tokens=None):
self.name = ''
# color
self.ambient = Color(vals=[1, 1, 1, 1])
self.diffuse = Color(vals=[1, 1, 1, 1])
self.emissive = Color(vals=[0, 0, 0, 0])
self.specular = Color(vals=[0, 0, 0, 0])
self.shininess = float(0.0)
# blend
self.scene_blend = 'one zero'
self.separate_scene_blend = ''
self.scene_blend_op = 'add'
self.separate_scene_blend_op = 'add add'
# depth
self.depth_check = 'on'
self.depth_write = 'on'
self.depth_func = 'less_equal'
self.iteration_depth_bias = 0.0
self.depth_bias_constant = 0.0
self.depth_bias_slopescale = 0.0
# alpha
self.alpha_rejection_function = 'always_pass'
self.alpha_rejection_threshold = float(0.0)
self.alpha_to_coverage = 'off'
# light scissor
self.light_scissor = 'off'
self.light_clip_planes = 'off'
# other
self.illumination_stage = 'none'
self.normalise_normals = 'off'
self.transparent_sorting = 'on'
# cull
self.cull_hardware = 'clockwise'
self.cull_software = 'back'
# other
self.lighting = 'on'
self.shading = 'gouraud'
self.polygon_mode = 'solid'
self.polygon_mode_overrideable = 'true'
self.fog_override = 'false'
self.colour_write = 'on'
self.start_light = int(0)
self.max_lights = int(8)
# iteration
self.iteration = 'once'
# points
self.point_size = float(1.0)
self.point_sprites = 'off'
self.point_size_attenuation = 'off'
self.point_size_min = float(0.0)
self.point_size_max = float(1.0)
# --- objects ---
self.texture_units = []
self.shaders = []
# --- for text output formatting ---
self.indent = ''
# grab parsed results
if tokens:
if tokens.mpass.name:
self.name = tokens.mpass.name
# --- color
if tokens.mpass.ambient:
self.ambient = tokens.mpass.ambient[0]
if tokens.mpass.diffuse:
self.diffuse = tokens.mpass.diffuse[0]
if tokens.mpass.emissive:
self.emissive = tokens.mpass.emissive[0]
if tokens.mpass.specular:
self.specular = tokens.mpass.specular[0].color[0]
self.shininess = tokens.mpass.specular[0].shininess[0]
# --- blend
if tokens.mpass.scene_blend:
self.scene_blend = ' '.join(tokens.mpass.scene_blend)
if tokens.mpass.scene_blend_op:
self.scene_blend_op = ' '.join(tokens.mpass.scene_blend_op)
if tokens.mpass.separate_scene_blend:
self.separate_scene_blend = ' '.join(tokens.mpass.separate_scene_blend)
if tokens.mpass.separate_scene_blend_op:
self.separate_scene_blend_op = ' '.join(tokens.mpass.separate_scene_blend_op)
# --- depth
if tokens.mpass.depth_check:
self.depth_check = ' '.join(tokens.mpass.depth_check)
if tokens.mpass.depth_write:
self.depth_write = ' '.join(tokens.mpass.depth_write)
if tokens.mpass.depth_func:
self.depth_func = ' '.join(tokens.mpass.depth_func)
if tokens.mpass.depth_bias:
if tokens.mpass.depth_bias.constant:
self.depth_bias_constant = tokens.mpass.depth_bias.constant
if tokens.mpass.depth_bias.slopescale:
self.depth_bias_slopescale = tokens.mpass.depth_bias.slopescale
if tokens.mpass.iteration_depth_bias:
self.iteration_depth_bias = tokens.mpass.iteration_depth_bias
# --- alpha
if tokens.mpass.alpha_rejection:
self.alpha_rejection_function = tokens.mpass.alpha_rejection.function
self.alpha_rejection_threshold = tokens.mpass.alpha_rejection.threshold
if tokens.mpass.alpha_to_coverage:
self.alpha_to_coverage = ' '.join(tokens.mpass.alpha_to_coverage)
# --- light scissor
if tokens.mpass.light_scissor:
self.light_scissor = ' '.join(tokens.mpass.light_scissor)
if tokens.mpass.light_clip_planes:
self.light_clip_planes = ' '.join(tokens.mpass.light_clip_planes)
# --- other
if tokens.mpass.illumination_stage:
self.illumination_stage = ' '.join(tokens.mpass.illumination_stage)
if tokens.mpass.normalise_normals:
self.normalise_normals = ' '.join(tokens.mpass.normalise_normals)
if tokens.mpass.transparent_sorting:
self.transparent_sorting = ' '.join(tokens.mpass.transparent_sorting)
# --- cull
if tokens.mpass.cull_hardware:
self.cull_hardware = ' '.join(tokens.mpass.cull_hardware)
if tokens.mpass.cull_software:
self.cull_software = ' '.join(tokens.mpass.cull_software)
# --- other
if tokens.mpass.lighting:
self.lighting = ' '.join(tokens.mpass.lighting)
if tokens.mpass.shading:
self.shading = ' '.join(tokens.mpass.shading)
if tokens.mpass.polygon_mode:
self.polygon_mode = ' '.join(tokens.mpass.polygon_mode)
if tokens.mpass.polygon_mode_overrideable:
self.polygon_mode_overrideable = ' '.join(tokens.mpass.polygon_mode_overrideable)
if tokens.mpass.fog_override:
self.fog_override = ' '.join(tokens.mpass.fog_override)
if tokens.mpass.colour_write:
self.colour_write = ' '.join(tokens.mpass.colour_write)
if tokens.mpass.start_light:
self.start_light = tokens.mpass.start_light[0]
if tokens.mpass.max_lights:
self.max_lights = tokens.mpass.max_lights[0]
if tokens.mpass.iteration:
self.iteration = ' '.join(tokens.mpass.iteration)
# --- point
if tokens.mpass.point_size:
self.point_size = tokens.mpass.point_size[0]
if tokens.mpass.point_sprites:
self.point_sprites = ' '.join(tokens.mpass.point_sprites)
if tokens.mpass.point_size_attenuation:
self.point_size_attenuation = ' '.join(tokens.mpass.point_size_attenuation)
if tokens.mpass.point_size_min:
self.point_size_min = tokens.mpass.point_size_min[0]
if tokens.mpass.point_size_max:
self.point_size_max = tokens.mpass.point_size_max[0]
# --- objects
if tokens.mpass.texture_units:
for tu in tokens.mpass.texture_units:
self.texture_units.append( tu )
if tokens.mpass.shaders:
for sh in tokens.mpass.shaders:
self.shaders.append( sh )
def __str__(self):
repr = '\n' + self.indent + 'pass' + ((' ' + self.name) if self.name else '')
repr += '\n' + self.indent + '{'
loc_indent = 4*' '
if self.ambient != Color(vals=[1, 1, 1, 1]):
repr += '\n' + self.indent + loc_indent + 'ambient ' + str(self.ambient)
if self.diffuse != Color(vals=[1, 1, 1, 1]):
repr += '\n' + self.indent + loc_indent + 'diffuse ' + str(self.diffuse)
if self.emissive != Color(vals=[0, 0, 0, 0]):
repr += '\n' + self.indent + loc_indent + 'emissive ' + str(self.emissive)
if (self.specular != Color(vals=[0, 0, 0, 0])) or (self.shininess != 0.0):
fmt = '{0:.6f}'
repr += '\n' + self.indent + loc_indent + 'specular ' + str(self.specular)\
+ ' ' + fmt.format(self.shininess).rstrip('0').rstrip('.')
if self.lighting != 'on':
repr += '\n' + self.indent + loc_indent + 'lighting ' + self.lighting
if self.shading != 'gouraud':
repr += '\n' + self.indent + loc_indent + 'shading ' + self.shading
if self.polygon_mode != 'solid':
repr += '\n' + self.indent + loc_indent + 'polygon_mode ' + self.polygon_mode
if self.polygon_mode_overrideable != 'true':
repr += '\n' + self.indent + loc_indent + 'polygon_mode_overrideable ' + self.polygon_mode_overrideable
if self.fog_override != 'false':
repr += '\n' + self.indent + loc_indent + 'fog_override ' + self.fog_override
if self.alpha_rejection_function != 'always_pass':
repr += '\n' + self.indent + loc_indent + 'alpha_rejection ' + self.alpha_rejection_function + ' ' + str(int(self.alpha_rejection_threshold))
if self.alpha_to_coverage != 'off':
repr += '\n' + self.indent + loc_indent + 'alpha_to_coverage ' + self.alpha_to_coverage
if self.scene_blend != 'one zero':
repr += '\n' + self.indent + loc_indent + 'scene_blend ' + self.scene_blend
if self.depth_write != 'on':
repr += '\n' + self.indent + loc_indent + 'depth_write ' + self.depth_write
if self.depth_check != 'on':
repr += '\n' + self.indent + loc_indent + 'depth_check ' + self.depth_check
if self.depth_func != 'less_equal':
repr += '\n' + self.indent + loc_indent + 'depth_func ' + self.depth_func
if not float_eq(0.0, self.depth_bias_constant) or not float_eq(0.0, self.depth_bias_slopescale):
repr += '\n' + self.indent + loc_indent + 'depth_bias '\
+ str(self.depth_bias_constant)\
+ (str(self.depth_bias_slopescale) if not float_eq(0.0, self.depth_bias_slopescale) else '')
if self.cull_hardware != 'clockwise':
repr += '\n' + self.indent + loc_indent + 'cull_hardware ' + self.cull_hardware
if self.cull_software != 'back':
repr += '\n' + self.indent + loc_indent + 'cull_software ' + self.cull_software
if self.transparent_sorting != 'on':
repr += '\n' + self.indent + loc_indent + 'transparent_sorting ' + self.transparent_sorting
if self.illumination_stage != 'none':
repr += '\n' + self.indent + loc_indent + 'illumination_stage ' + self.illumination_stage
if self.iteration != 'once':
repr += '\n' + self.indent + loc_indent + 'iteration ' + self.iteration
for tu in self.texture_units:
tu.indent = self.indent + loc_indent
repr += '\n' + str(tu)
for sh in self.shaders:
sh.indent = self.indent + loc_indent
repr += '\n' + str(sh)
repr += '\n' + self.indent + '}'
return repr
__repr__ = __str__
# should be hooked up to a 'subreader.ReadTechnique' instance
class MTechnique(object):
def __init__(self, tokens=None):
self.name = ''
self.scheme = ''
self.lod_index = int(0)
self.shadow_caster_material = ''
self.shadow_receiver_material = ''
self.gpu_vendor_rule = []
self.gpu_device_rule = []
self.passes = []
self.indent = ''
if tokens:
tech = tokens.technique
if tech.name:
self.name = tech.name
if tech.scheme:
self.scheme = ' '.join(tech.scheme)
if tech.lod_index:
self.lod_index = tech.lod_index[0]
if tech.shadow_caster_material:
self.shadow_caster_material = ' '.join(tech.shadow_caster_material)
if tech.shadow_receiver_material:
self.shadow_receiver_material = ' '.join(tech.shadow_receiver_material)
if tech.gpu_vendor_rules:
for vr in tech.gpu_vendor_rules:
self.gpu_vendor_rule.append(vr)
if tech.gpu_device_rules:
for dr in tech.gpu_device_rules:
self.gpu_device_rule.append(dr)
if tech.passes:
for p in tech.passes:
self.passes.append(p)
def __str__(self):
loc_indent = 4*' '
repr = ''
repr += '\n' + self.indent + 'technique' + ((' ' + self.name) if self.name else '')
repr += '\n' + self.indent + '{'
if self.scheme:
repr += '\n' + self.indent + loc_indent + 'scheme ' + self.scheme
if self.lod_index != 0:
repr += '\n' + self.indent + loc_indent + 'lod_index ' + str(self.lod_index)
if self.shadow_caster_material:
repr += '\n' + self.indent + loc_indent + 'shadow_caster_material ' + self.shadow_caster_material
if self.shadow_receiver_material:
repr += '\n' + self.indent + loc_indent + 'shadow_receiver_material ' + self.shadow_receiver_material
for pi in range(len(self.passes)):
p = self.passes[pi]
p.indent = self.indent + loc_indent
repr += str(p) + ('\n' if (pi < (len(self.passes)-1)) else '')
repr += '\n' + self.indent + '}'
return repr
__repr__ = __str__
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _microTVM-with-TFLite:
microTVM with TFLite Models
===========================
**Author**: `Tom Gall <https://github.com/tom-gall>`_
This tutorial is an introduction to working with microTVM and a TFLite
model with Relay.
"""
######################################################################
# .. note::
# If you want to run this tutorial on the microTVM Reference VM, download the Jupyter
# notebook using the link at the bottom of this page and save it into the TVM directory. Then:
#
# #. Login to the reference VM with a modified ``vagrant ssh`` command:
#
# ``$ vagrant ssh -- -L8888:localhost:8888``
#
# #. Install jupyter: ``pip install jupyterlab``
# #. ``cd`` to the TVM directory.
# #. Install tflite: poetry install -E importer-tflite
# #. Launch Jupyter Notebook: ``jupyter notebook``
# #. Copy the localhost URL displayed, and paste it into your browser.
# #. Navigate to saved Jupyter Notebook (``.ipynb`` file).
#
#
# Setup
# -----
#
# Install TFLite
# ^^^^^^^^^^^^^^
#
# To get started, TFLite package needs to be installed as prerequisite. You can do this in two ways:
#
# 1. Install tflite with ``pip``
#
# .. code-block:: bash
#
# pip install tflite=2.1.0 --user
#
# 2. Generate the TFLite package yourself. The steps are the following:
#
# Get the flatc compiler.
# Please refer to https://github.com/google/flatbuffers for details
# and make sure it is properly installed.
#
# .. code-block:: bash
#
# flatc --version
#
# Get the TFLite schema.
#
# .. code-block:: bash
#
# wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs
#
# Generate TFLite package.
#
# .. code-block:: bash
#
# flatc --python schema.fbs
#
# Add the current folder (which contains generated tflite module) to PYTHONPATH.
#
# .. code-block:: bash
#
# export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd)
#
# To validate that the TFLite package was installed successfully, ``python -c "import tflite"``
#
# Install Zephyr (physical hardware only)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# When running this tutorial with a host simulation (the default), you can use the host ``gcc`` to
# build a firmware image that simulates the device. When compiling to run on physical hardware, you
# need to install a *toolchain* plus some target-specific dependencies. microTVM allows you to
# supply any compiler and runtime that can launch the TVM RPC server, but to get started, this
# tutorial relies on the Zephyr RTOS to provide these pieces.
#
# You can install Zephyr by following the
# `Installation Instructions <https://docs.zephyrproject.org/latest/getting_started/index.html>`_.
#
# Aside: Recreating your own Pre-Trained TFLite model
# The tutorial downloads a pretrained TFLite model. When working with microcontrollers
# you need to be mindful these are highly resource constrained devices as such standard
# models like MobileNet may not fit into their modest memory.
#
# For this tutorial, we'll make use of one of the TF Micro example models.
#
# If you wish to replicate the training steps see:
# https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/hello_world/train
#
# .. note::
#
# If you accidentally download the example pretrained model from:
#
# ``wget https://storage.googleapis.com/download.tensorflow.org/models/tflite/micro/hello_world_2020_04_13.zip``
#
# this will fail due to an unimplemented opcode (114)
#
# Load and prepare the Pre-Trained Model
# --------------------------------------
#
# Load the pretrained TFLite model from a file in your current
# directory into a buffer
import os
import numpy as np
import tvm
from tvm.contrib.download import download_testdata
from tvm import relay
model_url = "https://people.linaro.org/~tom.gall/sine_model.tflite"
model_file = "sine_model.tflite"
model_path = download_testdata(model_url, model_file, module="data")
tflite_model_buf = open(model_path, "rb").read()
######################################################################
# Using the buffer, transform into a tflite model python object
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
######################################################################
# Print out the version of the model
version = tflite_model.Version()
print("Model Version: " + str(version))
######################################################################
# Parse the python model object to convert it into a relay module
# and weights.
# It is important to note that the input tensor name must match what
# is contained in the model.
#
# If you are unsure what that might be, this can be discovered by using
# the ``visualize.py`` script within the Tensorflow project.
# See `How do I inspect a .tflite file? <https://www.tensorflow.org/lite/guide/faq>`_
input_tensor = "dense_4_input"
input_shape = (1,)
input_dtype = "float32"
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype}
)
######################################################################
# Defining the target
# -------------------
#
# Now we create a build config for relay, turning off two options and then calling relay.build which
# will result in a C source file for the selected TARGET. When running on a simulated target of the
# same architecture as the host (where this Python script is executed) choose "host" below for the
# TARGET, the C Runtime as the RUNTIME and a proper board/VM to run it (Zephyr will create the right
# QEMU VM based on BOARD. In the example below the x86 arch is selected and a x86 VM is picked up accordingly:
#
RUNTIME = tvm.relay.backend.Runtime("crt", {"system-lib": True})
TARGET = tvm.target.target.micro("host")
BOARD = "qemu_x86"
#
# Compiling for physical hardware
# When running on physical hardware, choose a TARGET and a BOARD that describe the hardware. The
# STM32F746 Nucleo target and board is chosen in the example below. Another option would be to
# choose the STM32F746 Discovery board instead. Since that board has the same MCU as the Nucleo
# board but a couple of wirings and configs differ, it's necessary to select the "stm32f746g_disco"
# board to generated the right firmware image.
#
# TARGET = tvm.target.target.micro("stm32f746xx")
# BOARD = "nucleo_f746zg" # or "stm32f746g_disco#"
#
# For some boards, Zephyr runs them emulated by default, using QEMU. For example, below is the
# TARGET and BOARD used to build a microTVM firmware for the mps2-an521 board. Since that board
# runs emulated by default on Zephyr the suffix "-qemu" is added to the board name to inform
# microTVM that the QEMU transporter must be used to communicate with the board. If the board name
# already has the prefix "qemu_", like "qemu_x86", then it's not necessary to add that suffix.
#
# TARGET = tvm.target.target.micro("mps2_an521")
# BOARD = "mps2_an521-qemu"
######################################################################
# Now, compile the model for the target:
with tvm.transform.PassContext(
opt_level=3, config={"tir.disable_vectorize": True}, disabled_pass=["AlterOpLayout"]
):
module = relay.build(mod, target=TARGET, runtime=RUNTIME, params=params)
# Inspecting the compilation output
# ---------------------------------
#
# The compilation process has produced some C code implementing the operators in this graph. We
# can inspect it by printing the CSourceModule contents (for the purposes of this tutorial, let's
# just print the first 10 lines):
c_source_module = module.get_lib().imported_modules[0]
assert c_source_module.type_key == "c", "tutorial is broken"
c_source_code = c_source_module.get_source()
first_few_lines = c_source_code.split("\n")[:10]
assert any(
l.startswith("TVM_DLL int32_t tvmgen_default_") for l in first_few_lines
), f"tutorial is broken: {first_few_lines!r}"
print("\n".join(first_few_lines))
# Compiling the generated code
# ----------------------------
#
# Now we need to incorporate the generated C code into a project that allows us to run inference on the
# device. The simplest way to do this is to integrate it yourself, using microTVM's standard output format
# (:doc:`Model Library Format` </dev/model_library_format>`). This is a tarball with a standard layout:
# Get a temporary path where we can store the tarball (since this is running as a tutorial).
import tempfile
fd, model_library_format_tar_path = tempfile.mkstemp()
os.close(fd)
os.unlink(model_library_format_tar_path)
tvm.micro.export_model_library_format(module, model_library_format_tar_path)
import tarfile
with tarfile.open(model_library_format_tar_path, "r:*") as tar_f:
print("\n".join(f" - {m.name}" for m in tar_f.getmembers()))
# Cleanup for tutorial:
os.unlink(model_library_format_tar_path)
# TVM also provides a standard way for embedded platforms to automatically generate a standalone
# project, compile and flash it to a target, and communicate with it using the standard TVM RPC
# protocol. The Model Library Format serves as the model input to this process. When embedded
# platforms provide such an integration, they can be used directly by TVM for both host-driven
# inference and autotuning . This integration is provided by the
# `microTVM Project API` <https://github.com/apache/tvm-rfcs/blob/main/rfcs/0008-microtvm-project-api.md>_,
#
# Embedded platforms need to provide a Template Project containing a microTVM API Server (typically,
# this lives in a file ``microtvm_api_server.py`` in the root directory). Let's use the example ``host``
# project in this tutorial, which simulates the device using a POSIX subprocess and pipes:
import subprocess
import pathlib
template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt"))
project_options = {} # You can use options to provide platform-specific options through TVM.
# Compiling for physical hardware (or an emulated board, like the mps_an521)
# --------------------------------------------------------------------------
# For physical hardware, you can try out the Zephyr platform by using a different template project
# and options:
#
# template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr"))
# project_options = {"project_type": "host_driven", zephyr_board": "nucleo_f746zg"}}
# Create a temporary directory
import tvm.contrib.utils
temp_dir = tvm.contrib.utils.tempdir()
generated_project_dir = temp_dir / "generated-project"
generated_project = tvm.micro.generate_project(
template_project_path, module, generated_project_dir, project_options
)
# Build and flash the project
generated_project.build()
generated_project.flash()
######################################################################
# Next, establish a session with the simulated device and run the
# computation. The `with session` line would typically flash an attached
# microcontroller, but in this tutorial, it simply launches a subprocess
# to stand in for an attached microcontroller.
with tvm.micro.Session(transport_context_manager=generated_project.transport()) as session:
graph_mod = tvm.micro.create_local_graph_executor(
module.get_graph_json(), session.get_system_lib(), session.device
)
# Set the model parameters using the lowered parameters produced by `relay.build`.
graph_mod.set_input(**module.get_params())
# The model consumes a single float32 value and returns a predicted sine value. To pass the
# input value we construct a tvm.nd.array object with a single contrived number as input. For
# this model values of 0 to 2Pi are acceptable.
graph_mod.set_input(input_tensor, tvm.nd.array(np.array([0.5], dtype="float32")))
graph_mod.run()
tvm_output = graph_mod.get_output(0).numpy()
print("result is: " + str(tvm_output))
| |
"""Fake filesystem for easy mocking. Usage
# Setup
fs = FakeFilesystem()
fs.add_file('file.txt', data='contents goes here')
with fs.monkey.patch():
# Production code
with open('file.txt') as f:
print(f.read())
"""
import io
import os
import tempfile
from typing import Callable, Dict, List, Union
import uuid
from unittest.mock import patch
class Monkey(object):
def __init__(self, fs) -> None:
self.fs = fs
self.original = {} # type: Dict[str, Callable]
self.patches = [] # type: List[patch]
def patch(self):
"""Patches relevant functions in builtins, os, and shutil"""
self.patches.append(patch('builtins.open', self.fs.open))
self.patches.append(patch('os.path.exists', self.fs.exists))
self.patches.append(patch('os.path.isfile', self.fs.isfile))
self.patches.append(patch('os.path.getsize', self.fs.getsize))
self.patches.append(patch('os.path.isdir', self.fs.isdir))
self.patches.append(patch('shutil.copy', self.fs.copy))
self.patches.append(patch('shutil.chown', self.fs.chown))
self.patches.append(patch('shutil.rmtree', self.fs.rmtree))
self.patches.append(patch('os.rename', self.fs.rename))
self.patches.append(patch('os.mkdir', self.fs.mkdir))
self.patches.append(patch('os.makedirs', self.fs.makedirs))
self.patches.append(patch('os.remove', self.fs.remove))
self.patches.append(patch('os.stat', self.fs.stat))
self.patches.append(patch('os.listdir', self.fs.listdir))
self.patches.append(patch('tempfile.TemporaryDirectory', FakedTemporaryDirectory))
return self
def __enter__(self):
for p in self.patches:
p.start()
def __exit__(self, exc_type, exc_val, exc_tb):
for p in self.patches:
p.stop()
class InspectableBytesIO(io.BytesIO):
def __init__(self, onclose=None, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.onclose = onclose
def close(self) -> None:
if self.onclose:
self.onclose(self.getvalue())
super(InspectableBytesIO, self).close()
class FakeFile(object):
def __init__(self, data: bytes) -> None:
self.data = data
class FakeStat(object):
def __init__(self, path):
self.st_mtime = 0
class FakeFilesystem(object):
def __init__(self) -> None:
self.files = {} # type: Dict[str, FakeFile]
self.monkey = Monkey(self)
# Setup functions
def add_file(self, path: str, data: str) -> None:
p = os.path.normpath(path)
self.files[p] = FakeFile(data.encode('utf-8'))
# Assert functions
def content_for(self, path: str):
return self.files[path].data
# Fake functions
def open(self, path: str, mode: str = 'r') -> Union[io.BytesIO, io.TextIOWrapper]:
p = os.path.normpath(path)
if mode.startswith('r'):
if p in self.files:
data = io.BytesIO(self.files[p].data)
if 'b' in mode:
return data
return io.TextIOWrapper(data)
raise FileNotFoundError("[Errno 2] No such file or directory: '{}'".format(path))
if mode.startswith('w'):
# Add file
def store_file(content):
self.files[p] = FakeFile(content)
f = InspectableBytesIO(store_file)
if 'b' in mode:
return f
return io.TextIOWrapper(f)
if mode.startswith('a'):
# Add file
def append_file(content):
if p in self.files:
self.files[p] = FakeFile(self.files[p].data + content)
else:
self.files[p] = FakeFile(content)
f = InspectableBytesIO(append_file)
if 'b' in mode:
return f
return io.TextIOWrapper(f)
raise ValueError("invalid mode: '{}'".format(mode))
def exists(self, path: str) -> bool:
p = os.path.normpath(path)
return p in self.files
def copy(self, source: str, target: str) -> None:
s = os.path.normpath(source)
t = os.path.normpath(target)
if s not in self.files:
raise IOError("Could not copy '{}' to '{}'".format(s, t))
self.files[t] = self.files[s]
def chown(self, path: str, user: str, group: str = None):
p = os.path.normpath(path)
if p not in self.files:
raise FileNotFoundError("[Errno 2] No such file or directory: '{}'".format(path))
def rmtree(self, path):
p = os.path.normpath(path)
self.files = {key: value for key, value in self.files.items() if not key.startswith(p)}
def rename(self, source: str, target: str) -> None:
s = os.path.normpath(source)
t = os.path.normpath(target)
if s not in self.files:
raise FileNotFoundError("[Errno 2] No such file or directory: '{}' -> '{}'".format(source, target))
self.files[t] = self.files.pop(s)
def mkdir(self, path: str, mode: int = 0o777) -> None:
self.makedirs(path, mode)
def makedirs(self, path: str, mode: int = 0o777, exists_ok: bool = False) -> None:
# TODO(niko or samuel): Proper directory support
# Only files exists in the fake fs
p = os.path.normpath(path)
# Create empty marker file in directory
self.files[os.path.join(p, '..mark')] = FakeFile(b'')
def isfile(self, path):
p = os.path.normpath(path)
return p in self.files
def getsize(self, path):
p = os.path.normpath(path)
if p not in self.files:
raise FileNotFoundError("[Errno 2] No such file or directory: '{}'".format(path))
return len(self.files[p].data)
def isdir(self, path):
# TODO(niko or samuel): Proper directory support
p = os.path.normpath(path)
if p in self.files:
return False
return any(file.startswith(p) for file in self.files.keys())
def remove(self, path):
p = os.path.normpath(path)
if p not in self.files:
raise FileNotFoundError("[Errno 2] No such file or directory: '{}'".format(path))
del self.files[p]
def stat(self, path):
p = os.path.normpath(path)
if p not in self.files:
raise FileNotFoundError("[Errno 2] No such file or directory: '{}'".format(path))
return FakeStat(p)
def listdir(self, path):
def first_segment(subpath):
dirname, basename = os.path.split(subpath)
if not dirname or dirname == '/':
return basename
return first_segment(dirname)
# listdir also supports passing a file descriptor to directory
if isinstance(path, int):
return []
p = os.path.normpath(path)
if not self.isdir(p):
raise FileNotFoundError("[Errno 2] No such file or directory: '{}'".format(path))
# Handle files
suffixes = [f[len(p):] for f in self.files.keys() if f.startswith(p)]
return [first_segment(suff) for suff in suffixes if not suff.endswith('..mark')]
class FakedTemporaryDirectory(object):
def __enter__(self):
self.dirname = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
# No directory support in fakefs. Faking by adding a file
self.dummy_filepath = os.path.join(self.dirname, 'dummy_file')
with open(self.dummy_filepath, 'w'):
pass
return self.dirname
def __exit__(self, exc_type, exc_val, exc_tb):
os.remove(self.dummy_filepath)
self.dirname = None
self.dummy_filepath = None
| |
"""A switch driver for Dell S3048-ON running Dell OS 9.
Uses the XML REST API for communicating with the switch.
"""
import logging
from lxml import etree
import re
import requests
from schema import Schema, Optional
from hil.model import db, Switch, SwitchSession
from hil.errors import BadArgumentError
from hil.model import BigIntegerType
from hil.network_allocator import get_network_allocator
from hil.ext.switches.common import should_save, check_native_networks, \
parse_vlans
from hil.config import core_schema, string_is_bool
logger = logging.getLogger(__name__)
CONFIG = 'config-commands'
SHOW = 'show-command'
EXEC = 'exec-command'
core_schema[__name__] = {
Optional('save'): string_is_bool
}
class DellNOS9(Switch, SwitchSession):
"""Dell S3048-ON running Dell NOS9"""
api_name = 'http://schema.massopencloud.org/haas/v0/switches/dellnos9'
__mapper_args__ = {
'polymorphic_identity': api_name,
}
id = db.Column(BigIntegerType,
db.ForeignKey('switch.id'), primary_key=True)
hostname = db.Column(db.String, nullable=False)
username = db.Column(db.String, nullable=False)
password = db.Column(db.String, nullable=False)
interface_type = db.Column(db.String, nullable=False)
@staticmethod
def validate(kwargs):
Schema({
'hostname': basestring,
'username': basestring,
'password': basestring,
'interface_type': basestring,
}).validate(kwargs)
def session(self):
return self
def ensure_legal_operation(self, nic, op_type, channel):
check_native_networks(nic, op_type, channel)
def get_capabilities(self):
return []
@staticmethod
def validate_port_name(port):
"""Valid port names for this switch are of the form 1/0/1 or 1/2"""
if not re.match(r'^\d+/\d+(/\d+)?$', port):
raise BadArgumentError("Invalid port name. Valid port names for "
"this switch are of the form 1/0/1 or 1/2")
return
def disconnect(self):
"""Since the switch is not connection oriented, we don't need to
establish a session or disconnect from it."""
def modify_port(self, port, channel, new_network):
(port,) = filter(lambda p: p.label == port, self.ports)
interface = port.label
if channel == 'vlan/native':
if new_network is None:
self._remove_native_vlan(interface)
self._port_shutdown(interface)
else:
self._set_native_vlan(interface, new_network)
else:
vlan_id = channel.replace('vlan/', '')
legal = get_network_allocator(). \
is_legal_channel_for(channel, vlan_id)
assert legal, "HIL passed an invalid channel to the switch!"
if new_network is None:
self._remove_vlan_from_trunk(interface, vlan_id)
else:
assert new_network == vlan_id
self._add_vlan_to_trunk(interface, vlan_id)
if should_save(self):
self.save_running_config()
def revert_port(self, port):
self._remove_all_vlans_from_trunk(port)
if self._get_native_vlan(port) is not None:
self._remove_native_vlan(port)
self._port_shutdown(port)
if should_save(self):
self.save_running_config()
def get_port_networks(self, ports):
response = {}
for port in ports:
response[port] = self._get_vlans(port.label)
native = self._get_native_vlan(port.label)
if native is not None:
response[port].append(native)
return response
def _get_vlans(self, interface):
""" Return the vlans of a trunk port.
Does not include the native vlan. Use _get_native_vlan.
Args:
interface: interface to return the vlans of
Returns: List containing the vlans of the form:
[('vlan/vlan1', vlan1), ('vlan/vlan2', vlan2)]
"""
# It uses the REST API CLI which is slow but it is the only way
# because the switch is VLAN centric. Doing a GET on interface won't
# return the VLANs on it, we would have to do get on all vlans (if that
# worked reliably in the first place) and then find our interface there
# which is not feasible.
if not self._is_port_on(interface):
return []
response = self._get_port_info(interface)
# finds a comma separated list of integers and/or ranges starting with
# T. Sample T12,14-18,23,28,80-90 or T20 or T20,22 or T20-22
match = re.search(r'T(\d+(-\d+)?)(,\d+(-\d+)?)*', response)
if match is None:
return []
vlan_list = parse_vlans(match.group().replace('T', ''))
return [('vlan/%s' % x, x) for x in vlan_list]
def _get_native_vlan(self, interface):
""" Return the native vlan of an interface.
Args:
interface: interface to return the native vlan of
Returns: Tuple of the form ('vlan/native', vlan) or None
Similar to _get_vlans()
"""
if not self._is_port_on(interface):
return None
response = self._get_port_info(interface)
match = re.search(r'NativeVlanId:(\d+)\.', response)
if match is not None:
vlan = match.group(1)
else:
logger.error('Unexpected: No native vlan found')
return
return ('vlan/native', vlan)
def _get_port_info(self, interface):
"""Returns the output of a show interface command. This removes all
spaces from the response before returning it which is then parsed by
the caller.
Sample Response:
u"<outputxmlns='http://www.dell.com/ns/dell:0.1/root'>\n
<command>show interfaces switchport GigabitEthernet1/3\r\n\r\n
Codes: U-Untagged T-Tagged\r\n x-Dot1x untagged,X-Dot1xtagged\r\n
G-GVRP tagged,M-Trunk\r\n i-Internal untagged, I-Internaltagged,
v-VLTuntagged, V-VLTtagged\r\n\r\n Name:GigabitEthernet1/3\r\n 802.1Q
Tagged:Hybrid\r\n Vlan membership:\r\n Q Vlans\r\n U 1512 \r\n T 1511
1612-1614,1700\r\n\r\n Native Vlan Id: 1512.\r\n\r\n\r\n\r\n
MOC-Dell-S3048-ON#</command>\n</output>\n"
"""
command = 'interfaces switchport %s %s' % \
(self.interface_type, interface)
response = self._execute(SHOW, command)
return response.text.replace(' ', '')
def _add_vlan_to_trunk(self, interface, vlan):
""" Add a vlan to a trunk port.
If the port is not trunked, its mode will be set to trunk.
Args:
interface: interface to add the vlan to
vlan: vlan to add
"""
if not self._is_port_on(interface):
self._port_on(interface)
command = 'interface vlan ' + vlan + '\r\n tagged ' + \
self.interface_type + ' ' + interface
self._execute(CONFIG, command)
def _remove_vlan_from_trunk(self, interface, vlan):
""" Remove a vlan from a trunk port.
Args:
interface: interface to remove the vlan from
vlan: vlan to remove
"""
command = self._remove_vlan_command(interface, vlan)
self._execute(CONFIG, command)
def _remove_all_vlans_from_trunk(self, interface):
""" Remove all vlan from a trunk port.
Args:
interface: interface to remove the vlan from
"""
command = ''
for vlan in self._get_vlans(interface):
command += self._remove_vlan_command(interface, vlan[1]) + '\r\n '
# execute command only if there are some vlans to remove, otherwise
# the switch complains
if command is not '':
self._execute(CONFIG, command)
def _remove_vlan_command(self, interface, vlan):
"""Returns command to remove <vlan> from <interface>"""
return 'interface vlan ' + vlan + '\r\n no tagged ' + \
self.interface_type + ' ' + interface
def _set_native_vlan(self, interface, vlan):
""" Set the native vlan of an interface.
Args:
interface: interface to set the native vlan to
vlan: vlan to set as the native vlan
Method relies on the REST API CLI which is slow
"""
if not self._is_port_on(interface):
self._port_on(interface)
command = 'interface vlan ' + vlan + '\r\n untagged ' + \
self.interface_type + ' ' + interface
self._execute(CONFIG, command)
def _remove_native_vlan(self, interface):
""" Remove the native vlan from an interface.
Args:
interface: interface to remove the native vlan from.vlan
"""
try:
vlan = self._get_native_vlan(interface)[1]
command = 'interface vlan ' + vlan + '\r\n no untagged ' + \
self.interface_type + ' ' + interface
self._execute(CONFIG, command)
except TypeError:
logger.error('No native vlan to remove')
def _port_shutdown(self, interface):
""" Shuts down <interface>
Turn off portmode hybrid, disable switchport, and then shut down the
port. All non-default vlans must be removed before calling this.
"""
url = self._construct_url(interface=interface)
interface = self._convert_interface_type(self.interface_type) + \
interface.replace('/', '-')
payload = '<interface><name>%s</name><portmode><hybrid>false' \
'</hybrid></portmode><shutdown>true</shutdown>' \
'</interface>' % interface
self._make_request('PUT', url, data=payload)
def _port_on(self, interface):
""" Turns on <interface>
Turn on port and enable hybrid portmode and switchport.
"""
url = self._construct_url(interface=interface)
interface = self._convert_interface_type(self.interface_type) + \
interface.replace('/', '-')
payload = '<interface><name>%s</name><portmode><hybrid>true' \
'</hybrid></portmode><switchport></switchport>' \
'<shutdown>false</shutdown></interface>' % interface
self._make_request('PUT', url, data=payload)
def _is_port_on(self, port):
""" Returns a boolean that tells the status of a switchport"""
# the url here requires a suffix to GET the shutdown tag in response.
url = self._construct_url(interface=port) + r'\?with-defaults'
response = self._make_request('GET', url)
root = etree.fromstring(response.text)
shutdown = root.find(self._construct_tag('shutdown')).text
assert shutdown in ('false', 'true'), "unexpected state of switchport"
return shutdown == 'false'
def save_running_config(self):
command = 'write'
self._execute(EXEC, command)
def get_config(self, config_type):
command = config_type + '-config'
config = self._execute(SHOW, command).text
# The config files always have some lines in the beginning that we
# need to remove otherwise the comparison would fail. Here's a sample:
# Current Configuration ...
# ! Version 9.11(0.0P6)
# ! Last configuration change at Fri Nov 3 23:51:01 2017 by smartuser
# ! Startup-config last updated at Sat Nov 4 02:04:57 2017 by admin
# !
# boot system stack-unit 1 primary system://A
# boot system stack-unit 1 secondary system://B
# !
# hostname MOC-Dell-S3048-ON
# !
# protocol lldp
# !
# redundancy auto-synchronize full
# !
# username xxxxx password 7 XXXXXXXx privilege 15
# !
# stack-unit 1 provision S3048-ON
lines_to_remove = 0
lines = config.splitlines()
for line in lines:
if 'username' in line:
break
lines_to_remove += 1
config = '\n'.join(lines[lines_to_remove:])
# there were some extra spaces in one of the config file types that
# would cause the tests to fail.
return config.replace(" ", "")
# HELPER METHODS *********************************************
def _execute(self, command_type, command):
"""This method gets the url & the payload and executes <command>"""
url = self._construct_url()
payload = self._make_payload(command_type, command)
return self._make_request('POST', url, data=payload)
def _construct_url(self, interface=None):
""" Construct the API url for a specific interface.
Args:
interface: interface to construct the url for
Returns: string with the url for a specific interface and operation
If interface is None, then it returns the URL for REST API CLI.
"""
if interface is None:
return '%s/api/running/dell/_operations/cli' % self.hostname
try:
self.validate_port_name(interface)
# if `interface` refers to port name
# the urls have dashes instead of slashes in interface names
interface = interface.replace('/', '-')
interface_type = self._convert_interface_type(self.interface_type)
except BadArgumentError:
# interface refers to `vlan`
interface_type = 'vlan-'
return ''.join([self.hostname, '/api/running/dell/interfaces/'
'interface/', interface_type, interface])
@staticmethod
def _convert_interface_type(interface_type):
""" Convert the interface type from switch CLI form to what the API
server understands.
Args:
interface: the interface in the CLI-form
Returns: string interface
"""
iftypes = {'GigabitEthernet': 'gige-',
'TenGigabitEthernet': 'tengig-',
'TwentyfiveGigabitEthernet': 'twentyfivegig-',
'fortyGigE': 'fortygig-',
'peGigabitEthernet': 'pegig-',
'FiftyGigabitEthernet': 'fiftygig-',
'HundredGigabitEthernet': 'hundredgig-'}
return iftypes[interface_type]
@property
def _auth(self):
return self.username, self.password
@staticmethod
def _make_payload(command_type, command):
"""Makes payload for passing CLI commands using the REST API"""
return '<input><%s>%s</%s></input>' % (command_type, command,
command_type)
@staticmethod
def _construct_tag(name):
""" Construct the xml tag by prepending the dell tag prefix. """
return '{http://www.dell.com/ns/dell:0.1/root}%s' % name
def _make_request(self, method, url, data=None):
r = requests.request(method, url, data=data, auth=self._auth)
if r.status_code >= 400:
logger.error('Bad Request to switch. Response: %s', r.text)
return r
| |
"""
Blizzard Mipmap Format (.blp)
Jerome Leclanche <jerome@leclan.ch>
The contents of this file are hereby released in the public domain (CC0)
Full text of the CC0 license:
https://creativecommons.org/publicdomain/zero/1.0/
BLP1 files, used mostly in Warcraft III, are not fully supported.
All types of BLP2 files used in World of Warcraft are supported.
The BLP file structure consists of a header, up to 16 mipmaps of the
texture
Texture sizes must be powers of two, though the two dimensions do
not have to be equal; 512x256 is valid, but 512x200 is not.
The first mipmap (mipmap #0) is the full size image; each subsequent
mipmap halves both dimensions. The final mipmap should be 1x1.
BLP files come in many different flavours:
* JPEG-compressed (type == 0) - only supported for BLP1.
* RAW images (type == 1, encoding == 1). Each mipmap is stored as an
array of 8-bit values, one per pixel, left to right, top to bottom.
Each value is an index to the palette.
* DXT-compressed (type == 1, encoding == 2):
- DXT1 compression is used if alpha_encoding == 0.
- An additional alpha bit is used if alpha_depth == 1.
- DXT3 compression is used if alpha_encoding == 1.
- DXT5 compression is used if alpha_encoding == 7.
"""
import struct
from io import BytesIO
from . import Image, ImageFile
BLP_FORMAT_JPEG = 0
BLP_ENCODING_UNCOMPRESSED = 1
BLP_ENCODING_DXT = 2
BLP_ENCODING_UNCOMPRESSED_RAW_BGRA = 3
BLP_ALPHA_ENCODING_DXT1 = 0
BLP_ALPHA_ENCODING_DXT3 = 1
BLP_ALPHA_ENCODING_DXT5 = 7
def unpack_565(i):
return (
((i >> 11) & 0x1f) << 3,
((i >> 5) & 0x3f) << 2,
(i & 0x1f) << 3
)
def decode_dxt1(data, alpha=False):
"""
input: one "row" of data (i.e. will produce 4*width pixels)
"""
blocks = len(data) // 8 # number of blocks in row
ret = (bytearray(), bytearray(), bytearray(), bytearray())
for block in range(blocks):
# Decode next 8-byte block.
idx = block * 8
color0, color1, bits = struct.unpack_from("<HHI", data, idx)
r0, g0, b0 = unpack_565(color0)
r1, g1, b1 = unpack_565(color1)
# Decode this block into 4x4 pixels
# Accumulate the results onto our 4 row accumulators
for j in range(4):
for i in range(4):
# get next control op and generate a pixel
control = bits & 3
bits = bits >> 2
a = 0xFF
if control == 0:
r, g, b = r0, g0, b0
elif control == 1:
r, g, b = r1, g1, b1
elif control == 2:
if color0 > color1:
r = (2 * r0 + r1) // 3
g = (2 * g0 + g1) // 3
b = (2 * b0 + b1) // 3
else:
r = (r0 + r1) // 2
g = (g0 + g1) // 2
b = (b0 + b1) // 2
elif control == 3:
if color0 > color1:
r = (2 * r1 + r0) // 3
g = (2 * g1 + g0) // 3
b = (2 * b1 + b0) // 3
else:
r, g, b, a = 0, 0, 0, 0
if alpha:
ret[j].extend([r, g, b, a])
else:
ret[j].extend([r, g, b])
return ret
def decode_dxt3(data):
"""
input: one "row" of data (i.e. will produce 4*width pixels)
"""
blocks = len(data) // 16 # number of blocks in row
ret = (bytearray(), bytearray(), bytearray(), bytearray())
for block in range(blocks):
idx = block * 16
block = data[idx:idx + 16]
# Decode next 16-byte block.
bits = struct.unpack_from("<8B", block)
color0, color1 = struct.unpack_from("<HH", block, 8)
code, = struct.unpack_from("<I", block, 12)
r0, g0, b0 = unpack_565(color0)
r1, g1, b1 = unpack_565(color1)
for j in range(4):
high = False # Do we want the higher bits?
for i in range(4):
alphacode_index = (4 * j + i) // 2
a = bits[alphacode_index]
if high:
high = False
a >>= 4
else:
high = True
a &= 0xf
a *= 17 # We get a value between 0 and 15
color_code = (code >> 2 * (4 * j + i)) & 0x03
if color_code == 0:
r, g, b = r0, g0, b0
elif color_code == 1:
r, g, b = r1, g1, b1
elif color_code == 2:
r = (2 * r0 + r1) // 3
g = (2 * g0 + g1) // 3
b = (2 * b0 + b1) // 3
elif color_code == 3:
r = (2 * r1 + r0) // 3
g = (2 * g1 + g0) // 3
b = (2 * b1 + b0) // 3
ret[j].extend([r, g, b, a])
return ret
def decode_dxt5(data):
"""
input: one "row" of data (i.e. will produce 4 * width pixels)
"""
blocks = len(data) // 16 # number of blocks in row
ret = (bytearray(), bytearray(), bytearray(), bytearray())
for block in range(blocks):
idx = block * 16
block = data[idx:idx + 16]
# Decode next 16-byte block.
a0, a1 = struct.unpack_from("<BB", block)
bits = struct.unpack_from("<6B", block, 2)
alphacode1 = (
bits[2] | (bits[3] << 8) | (bits[4] << 16) | (bits[5] << 24)
)
alphacode2 = bits[0] | (bits[1] << 8)
color0, color1 = struct.unpack_from("<HH", block, 8)
code, = struct.unpack_from("<I", block, 12)
r0, g0, b0 = unpack_565(color0)
r1, g1, b1 = unpack_565(color1)
for j in range(4):
for i in range(4):
# get next control op and generate a pixel
alphacode_index = 3 * (4 * j + i)
if alphacode_index <= 12:
alphacode = (alphacode2 >> alphacode_index) & 0x07
elif alphacode_index == 15:
alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06)
else: # alphacode_index >= 18 and alphacode_index <= 45
alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07
if alphacode == 0:
a = a0
elif alphacode == 1:
a = a1
elif a0 > a1:
a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7
elif alphacode == 6:
a = 0
elif alphacode == 7:
a = 255
else:
a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5
color_code = (code >> 2 * (4 * j + i)) & 0x03
if color_code == 0:
r, g, b = r0, g0, b0
elif color_code == 1:
r, g, b = r1, g1, b1
elif color_code == 2:
r = (2 * r0 + r1) // 3
g = (2 * g0 + g1) // 3
b = (2 * b0 + b1) // 3
elif color_code == 3:
r = (2 * r1 + r0) // 3
g = (2 * g1 + g0) // 3
b = (2 * b1 + b0) // 3
ret[j].extend([r, g, b, a])
return ret
class BLPFormatError(NotImplementedError):
pass
class BlpImageFile(ImageFile.ImageFile):
"""
Blizzard Mipmap Format
"""
format = "BLP"
format_description = "Blizzard Mipmap Format"
def _open(self):
self.magic = self.fp.read(4)
self._read_blp_header()
if self.magic == b"BLP1":
decoder = "BLP1"
self.mode = "RGB"
elif self.magic == b"BLP2":
decoder = "BLP2"
self.mode = "RGBA" if self._blp_alpha_depth else "RGB"
else:
raise BLPFormatError("Bad BLP magic %r" % (self.magic))
self.tile = [
(decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))
]
def _read_blp_header(self):
self._blp_compression, = struct.unpack("<i", self.fp.read(4))
self._blp_encoding, = struct.unpack("<b", self.fp.read(1))
self._blp_alpha_depth, = struct.unpack("<b", self.fp.read(1))
self._blp_alpha_encoding, = struct.unpack("<b", self.fp.read(1))
self._blp_mips, = struct.unpack("<b", self.fp.read(1))
self._size = struct.unpack("<II", self.fp.read(8))
if self.magic == b"BLP1":
# Only present for BLP1
self._blp_encoding, = struct.unpack("<i", self.fp.read(4))
self._blp_subtype, = struct.unpack("<i", self.fp.read(4))
self._blp_offsets = struct.unpack("<16I", self.fp.read(16 * 4))
self._blp_lengths = struct.unpack("<16I", self.fp.read(16 * 4))
class _BLPBaseDecoder(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer):
try:
self.fd.seek(0)
self.magic = self.fd.read(4)
self._read_blp_header()
self._load()
except struct.error:
raise IOError("Truncated Blp file")
return 0, 0
def _read_palette(self):
ret = []
for i in range(256):
try:
b, g, r, a = struct.unpack("<4B", self.fd.read(4))
except struct.error:
break
ret.append((b, g, r, a))
return ret
def _read_blp_header(self):
self._blp_compression, = struct.unpack("<i", self.fd.read(4))
self._blp_encoding, = struct.unpack("<b", self.fd.read(1))
self._blp_alpha_depth, = struct.unpack("<b", self.fd.read(1))
self._blp_alpha_encoding, = struct.unpack("<b", self.fd.read(1))
self._blp_mips, = struct.unpack("<b", self.fd.read(1))
self.size = struct.unpack("<II", self.fd.read(8))
if self.magic == b"BLP1":
# Only present for BLP1
self._blp_encoding, = struct.unpack("<i", self.fd.read(4))
self._blp_subtype, = struct.unpack("<i", self.fd.read(4))
self._blp_offsets = struct.unpack("<16I", self.fd.read(16 * 4))
self._blp_lengths = struct.unpack("<16I", self.fd.read(16 * 4))
class BLP1Decoder(_BLPBaseDecoder):
def _load(self):
if self._blp_compression == BLP_FORMAT_JPEG:
self._decode_jpeg_stream()
elif self._blp_compression == 1:
if self._blp_encoding in (4, 5):
data = bytearray()
palette = self._read_palette()
_data = BytesIO(self.fd.read(self._blp_lengths[0]))
while True:
try:
offset, = struct.unpack("<B", _data.read(1))
except struct.error:
break
b, g, r, a = palette[offset]
data.extend([r, g, b])
self.set_as_raw(bytes(data))
else:
raise BLPFormatError(
"Unsupported BLP encoding %r" % (self._blp_encoding)
)
else:
raise BLPFormatError(
"Unsupported BLP compression %r" % (self._blp_encoding)
)
def _decode_jpeg_stream(self):
from PIL.JpegImagePlugin import JpegImageFile
jpeg_header_size, = struct.unpack("<I", self.fd.read(4))
jpeg_header = self.fd.read(jpeg_header_size)
self.fd.read(self._blp_offsets[0] - self.fd.tell()) # What IS this?
data = self.fd.read(self._blp_lengths[0])
data = jpeg_header + data
data = BytesIO(data)
image = JpegImageFile(data)
self.tile = image.tile # :/
self.fd = image.fp
self.mode = image.mode
class BLP2Decoder(_BLPBaseDecoder):
def _load(self):
palette = self._read_palette()
data = bytearray()
self.fd.seek(self._blp_offsets[0])
if self._blp_compression == 1:
# Uncompressed or DirectX compression
if self._blp_encoding == BLP_ENCODING_UNCOMPRESSED:
_data = BytesIO(self.fd.read(self._blp_lengths[0]))
while True:
try:
offset, = struct.unpack("<B", _data.read(1))
except struct.error:
break
b, g, r, a = palette[offset]
data.extend((r, g, b))
elif self._blp_encoding == BLP_ENCODING_DXT:
if self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT1:
linesize = (self.size[0] + 3) // 4 * 8
for yb in range((self.size[1] + 3) // 4):
for d in decode_dxt1(
self.fd.read(linesize),
alpha=bool(self._blp_alpha_depth)
):
data += d
elif self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT3:
linesize = (self.size[0] + 3) // 4 * 16
for yb in range((self.size[1] + 3) // 4):
for d in decode_dxt3(self.fd.read(linesize)):
data += d
elif self._blp_alpha_encoding == BLP_ALPHA_ENCODING_DXT5:
linesize = (self.size[0] + 3) // 4 * 16
for yb in range((self.size[1] + 3) // 4):
for d in decode_dxt5(self.fd.read(linesize)):
data += d
else:
raise BLPFormatError("Unsupported alpha encoding %r" % (
self._blp_alpha_encoding
))
else:
raise BLPFormatError(
"Unknown BLP encoding %r" % (self._blp_encoding)
)
else:
raise BLPFormatError(
"Unknown BLP compression %r" % (self._blp_compression)
)
self.set_as_raw(bytes(data))
Image.register_open(
BlpImageFile.format, BlpImageFile, lambda p: p[:4] in (b"BLP1", b"BLP2")
)
Image.register_extension(BlpImageFile.format, ".blp")
Image.register_decoder("BLP1", BLP1Decoder)
Image.register_decoder("BLP2", BLP2Decoder)
| |
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import hou
import IECore
import IECoreHoudini
import unittest
class TestAttributeRemap( IECoreHoudini.TestCase ):
def testCreateObjects(self):
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
torus.parm( "rows" ).set( 10 )
torus.parm( "cols" ).set( 10 )
tex = torus.createOutputNode( "texture" )
point = tex.createOutputNode( "point" )
point.parm("doclr").set(1)
point.parm("diffr").setExpression( "rand($PT)" )
point.parm("diffg").setExpression( "0.75" )
point.parm("diffb").setExpression( "0.5" )
attribute = point.createOutputNode( "attribute" )
op = attribute.createOutputNode( "ieOpHolder" )
cl = IECore.ClassLoader.defaultOpLoader().load("objectDebug", 1)()
fn = IECoreHoudini.FnOpHolder( op )
fn.setParameterised( cl )
op.parm("parm_quiet").set(True)
op.cook()
res = cl.resultParameter().getValue()
self.assertEqual( res.keys(), ["Cs","P","s","t"] )
self.assert_( geo )
self.assert_( torus )
self.assert_( point )
self.assert_( attribute )
self.assert_( op )
self.assert_( fn )
self.assert_( fn )
return (op, attribute)
def testNormalBehaviour(self):
(op, attr) = self.testCreateObjects()
op.cook()
fn = IECoreHoudini.FnOpHolder( op )
self.assert_( fn )
cl = fn.getParameterised()
object = cl.resultParameter().getValue()
self.assert_( object )
self.failUnless( 'P' in object )
self.assertEqual( object['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( object['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.failUnless( 'Cs' in object )
self.assertEqual( object['Cs'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( object['Cs'].data.typeId(), IECore.TypeId.Color3fVectorData )
def testBasicRemapping(self):
(op, attr) = self.testCreateObjects()
attr.parm("ridefault").set(True)
op.cook()
fn = IECoreHoudini.FnOpHolder( op )
self.assert_( fn )
cl = fn.getParameterised()
object = cl.resultParameter().getValue()
self.assertEqual( object.keys(), ['Cs', 'P', 'rixlate', 's', 't'] )
self.assert_( object )
self.failUnless( 'P' in object )
self.failUnless( object.arePrimitiveVariablesValid() )
self.assertEqual( object['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( object['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.failUnless( 'Cs' in object )
self.assertEqual( object['Cs'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex ) # the default ri conversion sets this as vertex
self.assertEqual( object['Cs'].data.typeId(), IECore.TypeId.Color3fVectorData )
self.failUnless( 'rixlate' in object )
self.assertEqual( object['rixlate'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( object['rixlate'].data.typeId(), IECore.TypeId.StringData )
self.failUnless( 's' in object )
self.assertEqual( object['s'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( object['s'].data.typeId(), IECore.TypeId.FloatVectorData )
self.failUnless( 't' in object )
self.assertEqual( object['t'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( object['t'].data.typeId(), IECore.TypeId.FloatVectorData )
def testMappingManual(self):
(op, attr) = self.testCreateObjects()
attr.parm("hname0").set("Cd")
attr.parm("riname0").set("col")
attr.parm("ritype0").set("v_color")
op.cook()
fn = IECoreHoudini.FnOpHolder(op)
geo = fn.getParameterised().resultParameter().getValue()
self.assertEqual( geo.typeId(), IECore.TypeId.MeshPrimitive )
self.assertEqual( geo.keys(), ["P", "col", "rixlate", "s", "t"] )
self.assertEqual( geo['col'].interpolation, IECore.PrimitiveVariable.Interpolation.Varying )
self.assertEqual( geo['col'].data.typeId(), IECore.TypeId.Color3fVectorData )
def testMappingOffsets(self):
(op, attr) = self.testCreateObjects()
attr.parm("hname0").set("Cd")
attr.parm("riname0").set("col_r")
attr.parm("ritype0").set("v_float")
attr.parm("rioff0").set(0)
attr.parm("hname1").set("Cd")
attr.parm("riname1").set("col_g")
attr.parm("ritype1").set("v_float")
attr.parm("rioff1").set(1)
attr.parm("hname2").set("Cd")
attr.parm("riname2").set("col_b")
attr.parm("ritype2").set("vtx_float") # try a vertex interpolation
attr.parm("rioff2").set(2)
op.cook()
fn = IECoreHoudini.FnOpHolder(op)
geo = fn.getParameterised().resultParameter().getValue()
self.assertEqual( geo.typeId(), IECore.TypeId.MeshPrimitive )
self.failUnless( "P" in geo.keys() )
self.failUnless( "col_r" in geo.keys() )
self.failUnless( "col_g" in geo.keys() )
self.failUnless( "col_b" in geo.keys() )
self.failUnless( "s" in geo.keys() )
self.failUnless( "t" in geo.keys() )
self.assertEqual( geo['col_r'].interpolation, IECore.PrimitiveVariable.Interpolation.Varying )
self.assertEqual( geo['col_r'].data.typeId(), IECore.TypeId.FloatVectorData )
self.assertNotEqual( geo['col_r'].data[0], geo['col_r'].data[1] )
self.assertEqual( geo['col_g'].interpolation, IECore.PrimitiveVariable.Interpolation.Varying )
self.assertEqual( geo['col_g'].data.typeId(), IECore.TypeId.FloatVectorData )
self.assertEqual( geo['col_g'].data[0], geo['col_g'].data[1], 0.75 )
self.assertEqual( geo['col_b'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( geo['col_b'].data.typeId(), IECore.TypeId.FloatVectorData )
self.assertEqual( geo['col_b'].data[0], geo['col_b'].data[1], 0.5 )
def testDuplicateNaming(self):
(op, attr) = self.testCreateObjects()
top = attr.inputs()[0]
attr1 = top.createOutputNode( "attribcreate", exact_type_name=True )
attr1.parm("name").set("test")
attr2 = attr1.createOutputNode( "attribcreate", exact_type_name=True )
attr2.parm("class").set(1) # primitive
attr2.parm("name").set("test")
attr.setInput(0,attr2)
attr.parm("hname0").set( "test" )
attr.parm("riname0").set( "point_test" ) # a vertex point float
attr.parm("ritype0").set("vtx_float")
attr.parm("hname1").set( "test" )
attr.parm("riname1").set( "prim_test" ) # a uniform primitive float
attr.parm("ritype1").set("u_float")
op.cook()
fn = IECoreHoudini.FnOpHolder(op)
geo = fn.getParameterised().resultParameter().getValue()
self.assertEqual( geo.keys(), ['Cs', 'P', 'point_test', 'prim_test', 'rixlate', 's', 't', 'varmap'] )
self.assertEqual( geo['point_test'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( len(geo['point_test'].data), 100 )
self.assertEqual( geo['prim_test'].interpolation, IECore.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( len(geo['prim_test'].data), 100 )
def testPrimAttributes(self):
(op, attr) = self.testCreateObjects()
top = attr.inputs()[0]
attr1 = top.createOutputNode( "attribcreate", exact_type_name=True )
attr1.parm("class").set(1) # primitive
attr1.parm("name").set("test")
attr.setInput(0,attr1)
attr.parm("hname0").set( "test" )
attr.parm("riname0").set( "test" ) # a uniform prim float
attr.parm("ritype0").set("u_float")
op.cook()
fn = IECoreHoudini.FnOpHolder(op)
geo = fn.getParameterised().resultParameter().getValue()
self.assertEqual( geo.keys(), ['Cs', 'P', 'rixlate', 's', 't', 'test', 'varmap'] )
self.assertEqual( geo['test'].interpolation, IECore.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( len(geo['test'].data), 100 )
attr.parm("hname0").set( "test" )
attr.parm("riname0").set( "test" ) # a constant prim float
attr.parm("ritype0").set("c_float")
op.cook()
fn = IECoreHoudini.FnOpHolder(op)
geo = fn.getParameterised().resultParameter().getValue()
self.assertEqual( geo.keys(), ['Cs', 'P', 'rixlate', 's', 't', 'test', 'varmap'] )
self.assertEqual( geo['test'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( len(geo['test'].data), 100 )
if __name__ == "__main__":
unittest.main()
| |
'''
Copyright (c) 2020 Yogesh Khatri
This file is part of mac_apt (macOS Artifact Parsing Tool).
Usage or distribution of this software/code is subject to the
terms of the MIT License.
mac_apt_mounted_sys_data.py
----------
This is a special version of the mac_apt.py script which is
specifically created for processing macOS 10.15 (Catalina) or
above images in MOUNTED mode. Here you will have two seperate
volumes mounted for SYSTEM and DATA. Provide both to this
script.
For usage information, run:
python mac_apt_mounted_sys_data.py -h
NOTE: This currently works only on Python3.7 or higher.
'''
import argparse
import logging
import os
import plugins.helpers.macinfo as macinfo
import sys
import textwrap
import time
import traceback
from plugins.helpers.writer import *
from plugins.helpers.disk_report import *
from plugin import *
from version import __VERSION
__PROGRAMNAME = "macOS Artifact Parsing Tool - SYS DATA Mounted mode"
__EMAIL = "yogesh@swiftforensics.com"
def IsItemPresentInList(collection, item):
try:
collection.index(item)
return True
except ValueError:
pass
return False
def FindMacOsFiles(mac_info):
if mac_info.IsValidFilePath('/System/Library/CoreServices/SystemVersion.plist'):
if mac_info.IsValidFilePath("/System/Library/Kernels/kernel") or \
mac_info.IsValidFilePath( "/mach_kernel"):
log.info ("Found valid OSX/macOS kernel")
else:
log.info ("Could not find OSX/macOS kernel!")# On partial/corrupted images, this may not be found
mac_info._GetSystemInfo()
mac_info._GetUserInfo()
return True
else:
log.info ("Could not find OSX/macOS installation!")
return False
def Exit(message=''):
if log and (len(message) > 0):
log.info(message)
sys.exit()
else:
sys.exit(message)
def SetupExportLogger(output_params):
'''Creates the writer for logging files exported'''
output_params.export_path = os.path.join(output_params.output_path, "Export")
if not os.path.exists(output_params.export_path):
try:
os.makedirs(output_params.export_path)
except Exception as ex:
log.error("Exception while creating Export folder: " + output_params.export_path + "\n Is the location Writeable?" +
"Is drive full? Perhaps the drive is disconnected? Exception Details: " + str(ex))
Exit()
export_sqlite_path = SqliteWriter.CreateSqliteDb(os.path.join(output_params.export_path, "Exported_Files_Log.db"))
writer = SqliteWriter(asynchronous=True)
writer.OpenSqliteDb(export_sqlite_path)
column_info = collections.OrderedDict([ ('SourcePath',DataType.TEXT), ('ExportPath',DataType.TEXT),
('InodeModifiedTime',DataType.DATE),('ModifiedTime',DataType.DATE),
('CreatedTime',DataType.DATE),('AccessedTime',DataType.DATE) ])
writer.CreateTable(column_info, 'ExportedFileInfo')
output_params.export_log_sqlite = writer
## Main program ##
plugins = []
log = None
plugin_count = ImportPlugins(plugins, 'MACOS')
if plugin_count == 0:
Exit ("No plugins could be added ! Exiting..")
plugin_name_list = ['ALL', 'FAST']
plugins_info = f"The following {len(plugins)} plugins are available:"
for plugin in plugins:
plugins_info += "\n {:<20}{}".format(plugin.__Plugin_Name, textwrap.fill(plugin.__Plugin_Description, subsequent_indent=' '*24, initial_indent=' '*24, width=80)[24:])
plugin_name_list.append(plugin.__Plugin_Name)
plugins_info += "\n " + "-"*76 + "\n" +\
" "*4 + "FAST" + " "*16 + "Runs all plugins except IDEVICEBACKUPS, SPOTLIGHT, UNIFIEDLOGS\n" + \
" "*4 + "ALL" + " "*17 + "Runs all plugins"
arg_parser = argparse.ArgumentParser(description='mac_apt is a framework to process macOS forensic artifacts\n'\
f'You are running {__PROGRAMNAME} version {__VERSION}\n\n'\
'Note: The default output is now sqlite, no need to specify it now',
epilog=plugins_info, formatter_class=argparse.RawTextHelpFormatter)
arg_parser.add_argument('input_sys_path', help='Path to root folder of mounted SYSTEM image/volume')
arg_parser.add_argument('input_data_path', help='Path to root folder of mounted DATA image/volume')
arg_parser.add_argument('-o', '--output_path', help='Path where output files will be created')
arg_parser.add_argument('-x', '--xlsx', action="store_true", help='Save output in Excel spreadsheet')
arg_parser.add_argument('-c', '--csv', action="store_true", help='Save output as CSV files')
arg_parser.add_argument('-t', '--tsv', action="store_true", help='Save output as TSV files (tab separated)')
#arg_parser.add_argument('-s', '--sqlite', action="store_true", help='Save output in an sqlite database')
arg_parser.add_argument('-l', '--log_level', help='Log levels: INFO, DEBUG, WARNING, ERROR, CRITICAL (Default is INFO)')#, choices=['INFO','DEBUG','WARNING','ERROR','CRITICAL'])
arg_parser.add_argument('plugin', nargs="+", help="Plugins to run (space separated). 'FAST' will run most plugins")
args = arg_parser.parse_args()
if args.output_path:
if (os.name != 'nt'):
if args.output_path.startswith('~/') or args.output_path == '~': # for linux/mac, translate ~ to user profile folder
args.output_path = os.path.expanduser(args.output_path)
print ("Output path was : {}".format(args.output_path))
if not CheckOutputPath(args.output_path):
Exit()
else:
args.output_path = os.path.abspath('.') # output to same folder as script.
if args.log_level:
args.log_level = args.log_level.upper()
if not args.log_level in ['INFO','DEBUG','WARNING','ERROR','CRITICAL']: # TODO: change to just [info, debug, error]
Exit("Invalid input type for log level. Valid values are INFO, DEBUG, WARNING, ERROR, CRITICAL")
else:
if args.log_level == "INFO": args.log_level = logging.INFO
elif args.log_level == "DEBUG": args.log_level = logging.DEBUG
elif args.log_level == "WARNING": args.log_level = logging.WARNING
elif args.log_level == "ERROR": args.log_level = logging.ERROR
elif args.log_level == "CRITICAL": args.log_level = logging.CRITICAL
else:
args.log_level = logging.INFO
log = CreateLogger(os.path.join(args.output_path, "Log." + str(time.strftime("%Y%m%d-%H%M%S")) + ".txt"), args.log_level, args.log_level) # Create logging infrastructure
log.setLevel(args.log_level)
log.info("Started {}, version {}".format(__PROGRAMNAME, __VERSION))
log.info("Dates and times are in UTC unless the specific artifact being parsed saves it as local time!")
log.debug(' '.join(sys.argv))
#LogLibraryVersions(log)
# Check inputs
if not os.path.isdir(args.input_sys_path):
Exit('Exiting -> Invalid SYSTEM volume path entered - {}'.format(args.input_sys_path))
if not os.path.isdir(args.input_data_path):
Exit('Exiting -> Invalid DATA volume path entered - {}'.format(args.input_data_path))
plugins_to_run = [x.upper() for x in args.plugin] # convert all plugin names entered by user to uppercase
process_all = IsItemPresentInList(plugins_to_run, 'ALL')
if not process_all:
if IsItemPresentInList(plugins_to_run, 'FAST'): # check for FAST
plugins_to_run = plugin_name_list
plugins_to_run.remove('ALL')
plugins_to_run.remove('FAST')
plugins_to_run.remove('IDEVICEBACKUPS')
plugins_to_run.remove('SPOTLIGHT')
plugins_to_run.remove('UNIFIEDLOGS')
else:
#Check for invalid plugin names or ones not Found
if not CheckUserEnteredPluginNames(plugins_to_run, plugins):
Exit("Exiting -> Invalid plugin name entered.")
# Check outputs, create output files
output_params = macinfo.OutputParams()
output_params.output_path = args.output_path
SetupExportLogger(output_params)
try:
sqlite_path = os.path.join(output_params.output_path, "mac_apt.db")
output_params.output_db_path = SqliteWriter.CreateSqliteDb(sqlite_path)
output_params.write_sql = True
except Exception as ex:
log.info('Sqlite db could not be created at : ' + sqlite_path)
log.exception('Exception occurred when trying to create Sqlite db')
Exit()
if args.xlsx:
try:
xlsx_path = os.path.join(output_params.output_path, "mac_apt.xlsx")
output_params.xlsx_writer = ExcelWriter()
output_params.xlsx_writer.CreateXlsxFile(xlsx_path)
output_params.write_xlsx = True
except Exception as ex:
log.info('XLSX file could not be created at : ' + xlsx_path)
log.exception('Exception occurred when trying to create XLSX file')
if args.csv:
output_params.write_csv = True
if args.tsv:
output_params.write_tsv = True
# At this point, all looks good, lets mount the image
found_macos = False
mac_info = None
time_processing_started = time.time()
try:
log.info("Opened images ")
mac_info = macinfo.MountedMacInfoSeperateSysData(args.input_sys_path, args.input_data_path, output_params)
found_macos = FindMacOsFiles(mac_info)
except Exception as ex:
log.exception("Failed to browse image. Error Details are: " + str(ex))
Exit()
# Start processing plugins now!
if found_macos:
for plugin in plugins:
if process_all or IsItemPresentInList(plugins_to_run, plugin.__Plugin_Name):
log.info("-"*50)
log.info("Running plugin " + plugin.__Plugin_Name)
try:
plugin.Plugin_Start(mac_info)
except Exception as ex:
log.exception ("An exception occurred while running plugin - {}".format(plugin.__Plugin_Name))
else:
log.warning (":( Could not find a partition having a macOS installation on it")
log.info("-"*50)
# Final cleanup
if args.xlsx:
output_params.xlsx_writer.CommitAndCloseFile()
if mac_info.is_apfs and mac_info.apfs_db != None:
mac_info.apfs_db.CloseDb()
time_processing_ended = time.time()
run_time = time_processing_ended - time_processing_started
log.info("Finished in time = {}".format(time.strftime('%H:%M:%S', time.gmtime(run_time))))
log.info("Review the Log file and report any ERRORs or EXCEPTIONS to the developers")
| |
# Make sure we always use the same backend for image comparison tests
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
import warnings
import pygtc
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
HAS_SCIPY = pygtc.haveScipy
MPLVER = int(matplotlib.__version__.split('.')[0])
if MPLVER < 2:
warnings.warn("Several tests are known to fail under matplotlib versions " +
"less than 2.0. The plots should still look good!",
UserWarning)
image_comp = pytest.mark.mpl_image_compare
# Set up some global variables for testing
def _make_random_chain(ndim=4, Npoints=10000, seed=0):
np.random.seed(seed)
means = np.random.rand(ndim)
cov = .5 - np.random.rand(ndim**2).reshape((ndim, ndim))
cov = np.triu(cov)
cov += cov.T - np.diag(cov.diagonal())
cov = np.dot(cov, cov)
samples = np.random.multivariate_normal(means, cov, Npoints)
return samples
# Create two sets of sample points with 4 parameters and 10000 points
SAMPLES_1 = 2*_make_random_chain(seed=1)
SAMPLES_2 = 1+_make_random_chain(seed=2)
SAMPLES_1[:, 3] += 1e8
SAMPLES_2[:, 3] += 1e8
# Specify kwargs for savefig. We change two things:
# 1: bbox tight ensures that the labels don't get cut off.
# 2: Set a dpi that won't suck on retina displays and will look fine on anything
# else too. This is only really an issue for raster graphics. Sane people will
# use a vector format, but testing is faster with raster.
SFKWARGS = {'bbox_inches': 'tight',
'dpi': 300}
# If this one fails, something is really wrong with matplotlib
@image_comp(filename='img.png', savefig_kwargs=SFKWARGS)
def test_img():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(1, 1)
return fig
# A test for (almost) every keyword argument
@image_comp(filename='bare.png', savefig_kwargs=SFKWARGS)
def test_GTC_bare():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
smoothingKernel=0)
@image_comp(filename='pandas.png', savefig_kwargs=SFKWARGS)
def test_GTC_pandas():
namesNoTex = ['param name', 'B_labmda', 'C', 'lambda']
if HAS_PANDAS:
samples1_pd = pd.DataFrame(SAMPLES_1, columns=namesNoTex)
samples2_pd = pd.DataFrame(SAMPLES_2, columns=namesNoTex)
else:
pytest.skip("Can't test pandas auto-name without pandas.")
return pygtc.plotGTC(chains=[samples1_pd, samples2_pd],
smoothingKernel=0)
@image_comp(filename='paramNames_noTex.png', savefig_kwargs=SFKWARGS)
def test_GTC_paramNames_noTex():
namesNoTex = ['param name', 'B_labmda', 'C', 'lambda']
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
paramNames=namesNoTex,
smoothingKernel=0)
@image_comp(filename='paramNames_withTex.png', savefig_kwargs=SFKWARGS)
def test_GTC_paramNames_withTex():
namesWithTex = ['param name', '$B_\\mathrm{\\lambda}$',
'$Q^a$', '$\\lambda$']
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
paramNames=namesWithTex,
smoothingKernel=0)
@image_comp(filename='chainLabels_noTex.png', savefig_kwargs=SFKWARGS)
def test_GTC_chainLabels_noTex():
chainLabelsNoTex = ['data1', 'data 2']
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
chainLabels=chainLabelsNoTex,
smoothingKernel=0)
@image_comp(filename='chainLabels_withTex.png', savefig_kwargs=SFKWARGS)
def test_GTC_chainLabels_withTex():
chainLabelsWithTex = ['data1 $\\lambda$', 'data 2']
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
chainLabels=chainLabelsWithTex,
smoothingKernel=0)
@image_comp(filename='truthLabels_noTex.png', savefig_kwargs=SFKWARGS)
def test_GTC_truthLabels_noTex():
truths = ((4, .5, None, .1),
(None, None, .3, 1))
truthLabelsNoTex = ('the truth', 'alternative truth')
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
truths=truths,
truthLabels=truthLabelsNoTex,
smoothingKernel=0)
@image_comp(filename='truthLabels_withTex.png', savefig_kwargs=SFKWARGS)
def test_GTC_truthLabels_withTex():
truths = ((4, .5, None, .1),
(None, None, .3, 1))
truthLabelsWithTex = ('the truth $f_0$', 'alternative truth $\\lambda$')
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
truths=truths,
truthLabels=truthLabelsWithTex,
smoothingKernel=0)
# TODO: Add a test for truthColors
@image_comp(filename='truthLineStyles.png', savefig_kwargs=SFKWARGS)
def test_GTC_truthLineStyles():
truthLineStyles = ['-', '-']
truths = ((4, .5, None, .1),
(None, None, .3, 1))
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
truths=truths,
truthLineStyles=truthLineStyles,
smoothingKernel=0)
@image_comp(filename='priors.png', tol=5e-3, savefig_kwargs=SFKWARGS)
def test_GTC_priors():
if not HAS_SCIPY:
pytest.skip("Can't test priors without scipy installed.")
priors = (None, (2, 1), (.5, 2), ())
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
priors=priors,
smoothingKernel=0)
# TODO: Think up a good way to test plotName
@image_comp(filename='nContourLevels.png', savefig_kwargs=SFKWARGS)
def test_GTC_nContourLevels():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
nContourLevels=3,
smoothingKernel=0)
@image_comp(filename='sigmaContourLevels.png', savefig_kwargs=SFKWARGS)
def test_GTC_sigmaContourLevels():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
sigmaContourLevels=True,
smoothingKernel=0)
@image_comp(filename='nBins.png', savefig_kwargs=SFKWARGS)
def test_GTC_nBins():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
nBins=20,
smoothingKernel=0)
@image_comp(filename='smoothingKernel.png', savefig_kwargs=SFKWARGS)
def test_GTC_smoothingKernel():
if not HAS_SCIPY:
pytest.skip("Can't test smoothing without scipy.")
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
smoothingKernel=2)
@image_comp(filename='filledPlots.png', savefig_kwargs=SFKWARGS)
def test_GTC_filledPlots():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
filledPlots=False,
smoothingKernel=0)
@image_comp(filename='plotDensity.png', savefig_kwargs=SFKWARGS)
def test_GTC_plotDensity():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
plotDensity=True,
smoothingKernel=0)
@image_comp(filename='figureSize.png', savefig_kwargs=SFKWARGS)
def test_GTC_figureSize():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
figureSize='APJ_page',
smoothingKernel=0)
@image_comp(filename='panelSpacing.png', savefig_kwargs=SFKWARGS)
def test_GTC_panelSpacing():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
panelSpacing='loose',
smoothingKernel=0)
# TODO: Add a test for legendMarker
# TODO: Add a test for paramRanges
@image_comp(filename='labelRotation.png', savefig_kwargs=SFKWARGS)
def test_GTC_labelRotation():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
labelRotation=(False, False),
smoothingKernel=0)
@image_comp(filename='tickShifts.png', savefig_kwargs=SFKWARGS)
def test_GTC_tickShifts():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
tickShifts=(0.2, 0.2),
smoothingKernel=0)
@image_comp(filename='colorsOrder.png', savefig_kwargs=SFKWARGS)
def test_GTC_colorsOrder():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
colorsOrder=['purples', 'yellows'],
smoothingKernel=0)
@image_comp(filename='do1dPlots.png', savefig_kwargs=SFKWARGS)
def test_GTC_do1dPlots():
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
do1dPlots=False,
smoothingKernel=0)
@image_comp(filename='doOnly1dPlot.png', savefig_kwargs=SFKWARGS)
def test_GTC_doOnly1dPlot():
input_chains = [np.array([SAMPLES_1[:, 0]]).T,
np.array([SAMPLES_2[:, 0]]).T]
return pygtc.plotGTC(chains=input_chains,
doOnly1dPlot=True,
smoothingKernel=0)
@image_comp(filename='mathTextFontSet.png', savefig_kwargs=SFKWARGS)
def test_GTC_mathTextFontSet():
namesWithTex = ['param name', '$B_\\mathrm{\\lambda}$',
'$Q^a$', '$\\lambda$']
return pygtc.plotGTC(chains=[SAMPLES_1, SAMPLES_2],
paramNames=namesWithTex,
mathTextFontSet=None,
smoothingKernel=0)
# TODO: Could add a few more tests to deal with label font customization...
| |
# Copyright (C) 2012 Midokura Japan K.K.
# Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from midonetclient import api
from midonetclient import exc
from midonetclient.neutron import client as n_client
from oslo.config import cfg
from sqlalchemy.orm import exc as sa_exc
from webob import exc as w_exc
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import portbindings_db
from neutron.db import securitygroups_db
from neutron.extensions import external_net as ext_net
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.midonet.common import config # noqa
from neutron.plugins.midonet.common import net_util
from neutron.plugins.midonet import midonet_lib
LOG = logging.getLogger(__name__)
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
METADATA_DEFAULT_IP = "169.254.169.254/32"
OS_FLOATING_IP_RULE_KEY = 'OS_FLOATING_IP'
OS_SG_RULE_KEY = 'OS_SG_RULE_ID'
OS_TENANT_ROUTER_RULE_KEY = 'OS_TENANT_ROUTER_RULE'
PRE_ROUTING_CHAIN_NAME = "OS_PRE_ROUTING_%s"
PORT_INBOUND_CHAIN_NAME = "OS_PORT_%s_INBOUND"
PORT_OUTBOUND_CHAIN_NAME = "OS_PORT_%s_OUTBOUND"
POST_ROUTING_CHAIN_NAME = "OS_POST_ROUTING_%s"
SG_INGRESS_CHAIN_NAME = "OS_SG_%s_INGRESS"
SG_EGRESS_CHAIN_NAME = "OS_SG_%s_EGRESS"
SG_PORT_GROUP_NAME = "OS_PG_%s"
SNAT_RULE = 'SNAT'
def handle_api_error(fn):
"""Wrapper for methods that throws custom exceptions."""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except (w_exc.HTTPException, exc.MidoApiConnectionError) as ex:
raise MidonetApiException(msg=ex)
return wrapped
class MidonetApiException(n_exc.NeutronException):
message = _("MidoNet API error: %(msg)s")
def _get_nat_ips(type, fip):
"""Get NAT IP address information.
From the route type given, determine the source and target IP addresses
from the provided floating IP DB object.
"""
if type == 'pre-routing':
return fip["floating_ip_address"], fip["fixed_ip_address"]
elif type == 'post-routing':
return fip["fixed_ip_address"], fip["floating_ip_address"]
else:
raise ValueError(_("Invalid nat_type %s") % type)
def _nat_chain_names(router_id):
"""Get the chain names for NAT.
These names are used to associate MidoNet chains to the NAT rules
applied to the router. For each of these, there are two NAT types,
'dnat' and 'snat' that are returned as keys, and the corresponding
chain names as their values.
"""
pre_routing_name = PRE_ROUTING_CHAIN_NAME % router_id
post_routing_name = POST_ROUTING_CHAIN_NAME % router_id
return {'pre-routing': pre_routing_name, 'post-routing': post_routing_name}
def _sg_chain_names(sg_id):
"""Get the chain names for security group.
These names are used to associate a security group to MidoNet chains.
There are two names for ingress and egress security group directions.
"""
ingress = SG_INGRESS_CHAIN_NAME % sg_id
egress = SG_EGRESS_CHAIN_NAME % sg_id
return {'ingress': ingress, 'egress': egress}
def _port_chain_names(port_id):
"""Get the chain names for a port.
These are chains to hold security group chains.
"""
inbound = PORT_INBOUND_CHAIN_NAME % port_id
outbound = PORT_OUTBOUND_CHAIN_NAME % port_id
return {'inbound': inbound, 'outbound': outbound}
def _sg_port_group_name(sg_id):
"""Get the port group name for security group..
This name is used to associate a security group to MidoNet port groups.
"""
return SG_PORT_GROUP_NAME % sg_id
def _rule_direction(sg_direction):
"""Convert the SG direction to MidoNet direction
MidoNet terms them 'inbound' and 'outbound' instead of 'ingress' and
'egress'. Also, the direction is reversed since MidoNet sees it
from the network port's point of view, not the VM's.
"""
if sg_direction == 'ingress':
return 'outbound'
elif sg_direction == 'egress':
return 'inbound'
else:
raise ValueError(_("Unrecognized direction %s") % sg_direction)
def _is_router_interface_port(port):
"""Check whether the given port is a router interface port."""
device_owner = port['device_owner']
return (device_owner in l3_db.DEVICE_OWNER_ROUTER_INTF)
def _is_router_gw_port(port):
"""Check whether the given port is a router gateway port."""
device_owner = port['device_owner']
return (device_owner in l3_db.DEVICE_OWNER_ROUTER_GW)
def _is_vif_port(port):
"""Check whether the given port is a standard VIF port."""
device_owner = port['device_owner']
return (not _is_dhcp_port(port) and
device_owner not in (l3_db.DEVICE_OWNER_ROUTER_GW,
l3_db.DEVICE_OWNER_ROUTER_INTF))
def _is_dhcp_port(port):
"""Check whether the given port is a DHCP port."""
device_owner = port['device_owner']
return device_owner.startswith(constants.DEVICE_OWNER_DHCP)
def _check_resource_exists(func, id, name, raise_exc=False):
"""Check whether the given resource exists in MidoNet data store."""
try:
func(id)
except midonet_lib.MidonetResourceNotFound as exc:
LOG.error(_("There is no %(name)s with ID %(id)s in MidoNet."),
{"name": name, "id": id})
if raise_exc:
raise MidonetPluginException(msg=exc)
class MidonetPluginException(n_exc.NeutronException):
message = _("%(msg)s")
class MidonetPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
portbindings_db.PortBindingMixin,
external_net_db.External_net_db_mixin,
l3_db.L3_NAT_db_mixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
securitygroups_db.SecurityGroupDbMixin):
supported_extension_aliases = ['external-net', 'router', 'security-group',
'agent', 'dhcp_agent_scheduler', 'binding',
'quotas']
__native_bulk_support = False
def __init__(self):
super(MidonetPluginV2, self).__init__()
# Read config values
midonet_conf = cfg.CONF.MIDONET
midonet_uri = midonet_conf.midonet_uri
admin_user = midonet_conf.username
admin_pass = midonet_conf.password
admin_project_id = midonet_conf.project_id
self.provider_router_id = midonet_conf.provider_router_id
self.provider_router = None
self.api_cli = n_client.MidonetClient(midonet_conf.midonet_uri,
midonet_conf.username,
midonet_conf.password,
project_id=midonet_conf.project_id)
self.mido_api = api.MidonetApi(midonet_uri, admin_user,
admin_pass,
project_id=admin_project_id)
self.client = midonet_lib.MidoClient(self.mido_api)
# self.provider_router_id should have been set.
if self.provider_router_id is None:
msg = _('provider_router_id should be configured in the plugin '
'config file')
LOG.exception(msg)
raise MidonetPluginException(msg=msg)
self.setup_rpc()
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_MIDONET,
portbindings.VIF_DETAILS: {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
def _get_provider_router(self):
if self.provider_router is None:
self.provider_router = self.client.get_router(
self.provider_router_id)
return self.provider_router
def _dhcp_mappings(self, context, fixed_ips, mac):
for fixed_ip in fixed_ips:
subnet = self._get_subnet(context, fixed_ip["subnet_id"])
if subnet["ip_version"] == 6:
# TODO(ryu) handle IPv6
continue
if not subnet["enable_dhcp"]:
# Skip if DHCP is disabled
continue
yield subnet['cidr'], fixed_ip["ip_address"], mac
def _metadata_subnets(self, context, fixed_ips):
for fixed_ip in fixed_ips:
subnet = self._get_subnet(context, fixed_ip["subnet_id"])
if subnet["ip_version"] == 6:
continue
yield subnet['cidr'], fixed_ip["ip_address"]
def _initialize_port_chains(self, port, in_chain, out_chain, sg_ids):
tenant_id = port["tenant_id"]
position = 1
# mac spoofing protection
self._add_chain_rule(in_chain, action='drop',
dl_src=port["mac_address"], inv_dl_src=True,
position=position)
# ip spoofing protection
for fixed_ip in port["fixed_ips"]:
position += 1
self._add_chain_rule(in_chain, action="drop",
src_addr=fixed_ip["ip_address"] + "/32",
inv_nw_src=True, dl_type=0x0800, # IPv4
position=position)
# conntrack
position += 1
self._add_chain_rule(in_chain, action='accept',
match_forward_flow=True,
position=position)
# Reset the position to process egress
position = 1
# Add rule for SGs
if sg_ids:
for sg_id in sg_ids:
chain_name = _sg_chain_names(sg_id)["ingress"]
chain = self.client.get_chain_by_name(tenant_id, chain_name)
self._add_chain_rule(out_chain, action='jump',
jump_chain_id=chain.get_id(),
jump_chain_name=chain_name,
position=position)
position += 1
# add reverse flow matching at the end
self._add_chain_rule(out_chain, action='accept',
match_return_flow=True,
position=position)
position += 1
# fall back DROP rule at the end except for ARP
self._add_chain_rule(out_chain, action='drop',
dl_type=0x0806, # ARP
inv_dl_type=True, position=position)
def _bind_port_to_sgs(self, context, port, sg_ids):
self._process_port_create_security_group(context, port, sg_ids)
if sg_ids is not None:
for sg_id in sg_ids:
pg_name = _sg_port_group_name(sg_id)
self.client.add_port_to_port_group_by_name(
port["tenant_id"], pg_name, port["id"])
def _unbind_port_from_sgs(self, context, port_id):
self._delete_port_security_group_bindings(context, port_id)
self.client.remove_port_from_port_groups(port_id)
def _create_accept_chain_rule(self, context, sg_rule, chain=None):
direction = sg_rule["direction"]
tenant_id = sg_rule["tenant_id"]
sg_id = sg_rule["security_group_id"]
chain_name = _sg_chain_names(sg_id)[direction]
if chain is None:
chain = self.client.get_chain_by_name(tenant_id, chain_name)
pg_id = None
if sg_rule["remote_group_id"] is not None:
pg_name = _sg_port_group_name(sg_id)
pg = self.client.get_port_group_by_name(tenant_id, pg_name)
pg_id = pg.get_id()
props = {OS_SG_RULE_KEY: str(sg_rule["id"])}
# Determine source or destination address by looking at direction
src_pg_id = dst_pg_id = None
src_addr = dst_addr = None
src_port_to = dst_port_to = None
src_port_from = dst_port_from = None
if direction == "egress":
dst_pg_id = pg_id
dst_addr = sg_rule["remote_ip_prefix"]
dst_port_from = sg_rule["port_range_min"]
dst_port_to = sg_rule["port_range_max"]
else:
src_pg_id = pg_id
src_addr = sg_rule["remote_ip_prefix"]
src_port_from = sg_rule["port_range_min"]
src_port_to = sg_rule["port_range_max"]
return self._add_chain_rule(
chain, action='accept', port_group_src=src_pg_id,
port_group_dst=dst_pg_id,
src_addr=src_addr, src_port_from=src_port_from,
src_port_to=src_port_to,
dst_addr=dst_addr, dst_port_from=dst_port_from,
dst_port_to=dst_port_to,
nw_proto=net_util.get_protocol_value(sg_rule["protocol"]),
dl_type=net_util.get_ethertype_value(sg_rule["ethertype"]),
properties=props)
def _remove_nat_rules(self, context, fip):
router = self.client.get_router(fip["router_id"])
self.client.remove_static_route(self._get_provider_router(),
fip["floating_ip_address"])
chain_names = _nat_chain_names(router.get_id())
for _type, name in chain_names.iteritems():
self.client.remove_rules_by_property(
router.get_tenant_id(), name,
OS_FLOATING_IP_RULE_KEY, fip["id"])
def setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.endpoints = [dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def create_subnet(self, context, subnet):
"""Create Neutron subnet.
Creates a Neutron subnet and a DHCP entry in MidoNet bridge.
"""
LOG.debug(_("MidonetPluginV2.create_subnet called: subnet=%r"), subnet)
s = subnet["subnet"]
net = super(MidonetPluginV2, self).get_network(
context, subnet['subnet']['network_id'], fields=None)
session = context.session
with session.begin(subtransactions=True):
sn_entry = super(MidonetPluginV2, self).create_subnet(context,
subnet)
bridge = self.client.get_bridge(sn_entry['network_id'])
gateway_ip = s['gateway_ip']
cidr = s['cidr']
if s['enable_dhcp']:
dns_nameservers = None
host_routes = None
if s['dns_nameservers'] is not attributes.ATTR_NOT_SPECIFIED:
dns_nameservers = s['dns_nameservers']
if s['host_routes'] is not attributes.ATTR_NOT_SPECIFIED:
host_routes = s['host_routes']
self.client.create_dhcp(bridge, gateway_ip, cidr,
host_rts=host_routes,
dns_servers=dns_nameservers)
# For external network, link the bridge to the provider router.
if net['router:external']:
self._link_bridge_to_gw_router(
bridge, self._get_provider_router(), gateway_ip, cidr)
LOG.debug(_("MidonetPluginV2.create_subnet exiting: sn_entry=%r"),
sn_entry)
return sn_entry
def delete_subnet(self, context, id):
"""Delete Neutron subnet.
Delete neutron network and its corresponding MidoNet bridge.
"""
LOG.debug(_("MidonetPluginV2.delete_subnet called: id=%s"), id)
subnet = super(MidonetPluginV2, self).get_subnet(context, id,
fields=None)
net = super(MidonetPluginV2, self).get_network(context,
subnet['network_id'],
fields=None)
session = context.session
with session.begin(subtransactions=True):
super(MidonetPluginV2, self).delete_subnet(context, id)
bridge = self.client.get_bridge(subnet['network_id'])
if subnet['enable_dhcp']:
self.client.delete_dhcp(bridge, subnet['cidr'])
# If the network is external, clean up routes, links, ports
if net[ext_net.EXTERNAL]:
self._unlink_bridge_from_gw_router(
bridge, self._get_provider_router())
LOG.debug(_("MidonetPluginV2.delete_subnet exiting"))
@handle_api_error
def create_network(self, context, network):
"""Create Neutron network.
Create a new Neutron network and its corresponding MidoNet bridge.
"""
LOG.debug('MidonetPluginV2.create_network called: network=%r',
network)
net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data)
net_data['tenant_id'] = tenant_id
self._ensure_default_security_group(context, tenant_id)
with context.session.begin(subtransactions=True):
net = super(MidonetPluginV2, self).create_network(context, network)
self._process_l3_create(context, net, net_data)
self.api_cli.create_network(net)
LOG.debug("MidonetPluginV2.create_network exiting: net=%r", net)
return net
@handle_api_error
def update_network(self, context, id, network):
"""Update Neutron network.
Update an existing Neutron network and its corresponding MidoNet
bridge.
"""
LOG.debug("MidonetPluginV2.update_network called: id=%(id)r, "
"network=%(network)r", {'id': id, 'network': network})
with context.session.begin(subtransactions=True):
net = super(MidonetPluginV2, self).update_network(
context, id, network)
self._process_l3_update(context, net, network['network'])
self.api_cli.update_network(id, net)
LOG.debug("MidonetPluginV2.update_network exiting: net=%r", net)
return net
@handle_api_error
def delete_network(self, context, id):
"""Delete a network and its corresponding MidoNet bridge."""
LOG.debug("MidonetPluginV2.delete_network called: id=%r", id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, id)
super(MidonetPluginV2, self).delete_network(context, id)
self.api_cli.delete_network(id)
LOG.debug("MidonetPluginV2.delete_network exiting: id=%r", id)
def create_port(self, context, port):
"""Create a L2 port in Neutron/MidoNet."""
LOG.debug(_("MidonetPluginV2.create_port called: port=%r"), port)
port_data = port['port']
# Create a bridge port in MidoNet and set the bridge port ID as the
# port ID in Neutron.
bridge = self.client.get_bridge(port_data["network_id"])
tenant_id = bridge.get_tenant_id()
asu = port_data.get("admin_state_up", True)
bridge_port = self.client.add_bridge_port(bridge,
admin_state_up=asu)
port_data["id"] = bridge_port.get_id()
try:
session = context.session
with session.begin(subtransactions=True):
# Create a Neutron port
new_port = super(MidonetPluginV2, self).create_port(context,
port)
port_data.update(new_port)
self._ensure_default_security_group_on_port(context,
port)
if _is_vif_port(port_data):
# Bind security groups to the port
sg_ids = self._get_security_groups_on_port(context, port)
self._bind_port_to_sgs(context, new_port, sg_ids)
# Create port chains
port_chains = {}
for d, name in _port_chain_names(
new_port["id"]).iteritems():
port_chains[d] = self.client.create_chain(tenant_id,
name)
self._initialize_port_chains(port_data,
port_chains['inbound'],
port_chains['outbound'],
sg_ids)
# Update the port with the chain
self.client.update_port_chains(
bridge_port, port_chains["inbound"].get_id(),
port_chains["outbound"].get_id())
# DHCP mapping is only for VIF ports
for cidr, ip, mac in self._dhcp_mappings(
context, port_data["fixed_ips"],
port_data["mac_address"]):
self.client.add_dhcp_host(bridge, cidr, ip, mac)
elif _is_dhcp_port(port_data):
# For DHCP port, add a metadata route
for cidr, ip in self._metadata_subnets(
context, port_data["fixed_ips"]):
self.client.add_dhcp_route_option(bridge, cidr, ip,
METADATA_DEFAULT_IP)
self._process_portbindings_create_and_update(context,
port_data, new_port)
except Exception as ex:
# Try removing the MidoNet port before raising an exception.
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to create a port on network %(net_id)s: "
"%(err)s"),
{"net_id": port_data["network_id"], "err": ex})
self.client.delete_port(bridge_port.get_id())
LOG.debug(_("MidonetPluginV2.create_port exiting: port=%r"), new_port)
return new_port
def get_port(self, context, id, fields=None):
"""Retrieve port."""
LOG.debug(_("MidonetPluginV2.get_port called: id=%(id)s "
"fields=%(fields)r"), {'id': id, 'fields': fields})
port = super(MidonetPluginV2, self).get_port(context, id, fields)
"Check if the port exists in MidoNet DB"""
try:
self.client.get_port(id)
except midonet_lib.MidonetResourceNotFound as exc:
LOG.error(_("There is no port with ID %(id)s in MidoNet."),
{"id": id})
port['status'] = constants.PORT_STATUS_ERROR
raise exc
LOG.debug(_("MidonetPluginV2.get_port exiting: port=%r"), port)
return port
def get_ports(self, context, filters=None, fields=None):
"""List neutron ports and verify that they exist in MidoNet."""
LOG.debug(_("MidonetPluginV2.get_ports called: filters=%(filters)s "
"fields=%(fields)r"),
{'filters': filters, 'fields': fields})
ports = super(MidonetPluginV2, self).get_ports(context, filters,
fields)
return ports
def delete_port(self, context, id, l3_port_check=True):
"""Delete a neutron port and corresponding MidoNet bridge port."""
LOG.debug(_("MidonetPluginV2.delete_port called: id=%(id)s "
"l3_port_check=%(l3_port_check)r"),
{'id': id, 'l3_port_check': l3_port_check})
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
self.disassociate_floatingips(context, id)
port = self.get_port(context, id)
device_id = port['device_id']
# If this port is for router interface/gw, unlink and delete.
if _is_router_interface_port(port):
self._unlink_bridge_from_router(device_id, id)
elif _is_router_gw_port(port):
# Gateway removed
# Remove all the SNAT rules that are tagged.
router = self._get_router(context, device_id)
tenant_id = router["tenant_id"]
chain_names = _nat_chain_names(device_id)
for _type, name in chain_names.iteritems():
self.client.remove_rules_by_property(
tenant_id, name, OS_TENANT_ROUTER_RULE_KEY,
SNAT_RULE)
# Remove the default routes and unlink
self._remove_router_gateway(port['device_id'])
self.client.delete_port(id, delete_chains=True)
try:
for cidr, ip, mac in self._dhcp_mappings(
context, port["fixed_ips"], port["mac_address"]):
self.client.delete_dhcp_host(port["network_id"], cidr, ip,
mac)
except Exception:
LOG.error(_("Failed to delete DHCP mapping for port %(id)s"),
{"id": id})
super(MidonetPluginV2, self).delete_port(context, id)
def update_port(self, context, id, port):
"""Handle port update, including security groups and fixed IPs."""
with context.session.begin(subtransactions=True):
# Get the port and save the fixed IPs
old_port = self._get_port(context, id)
net_id = old_port["network_id"]
mac = old_port["mac_address"]
old_ips = old_port["fixed_ips"]
# update the port DB
p = super(MidonetPluginV2, self).update_port(context, id, port)
if "admin_state_up" in port["port"]:
asu = port["port"]["admin_state_up"]
mido_port = self.client.update_port(id, admin_state_up=asu)
# If we're changing the admin_state_up flag and the port is
# associated with a router, then we also need to update the
# peer port.
if _is_router_interface_port(p):
self.client.update_port(mido_port.get_peer_id(),
admin_state_up=asu)
new_ips = p["fixed_ips"]
if new_ips:
bridge = self.client.get_bridge(net_id)
# If it's a DHCP port, add a route to reach the MD server
if _is_dhcp_port(p):
for cidr, ip in self._metadata_subnets(
context, new_ips):
self.client.add_dhcp_route_option(
bridge, cidr, ip, METADATA_DEFAULT_IP)
else:
# IPs have changed. Re-map the DHCP entries
for cidr, ip, mac in self._dhcp_mappings(
context, old_ips, mac):
self.client.remove_dhcp_host(
bridge, cidr, ip, mac)
for cidr, ip, mac in self._dhcp_mappings(
context, new_ips, mac):
self.client.add_dhcp_host(
bridge, cidr, ip, mac)
if (self._check_update_deletes_security_groups(port) or
self._check_update_has_security_groups(port)):
self._unbind_port_from_sgs(context, p["id"])
sg_ids = self._get_security_groups_on_port(context, port)
self._bind_port_to_sgs(context, p, sg_ids)
self._process_portbindings_create_and_update(context,
port['port'],
p)
return p
def create_router(self, context, router):
"""Handle router creation.
When a new Neutron router is created, its corresponding MidoNet router
is also created. In MidoNet, this router is initialized with chains
for inbound and outbound traffic, which will be used to hold other
chains that include various rules, such as NAT.
:param router: Router information provided to create a new router.
"""
# NOTE(dcahill): Similar to the NSX plugin, we completely override
# this method in order to be able to use the MidoNet ID as Neutron ID
# TODO(dcahill): Propose upstream patch for allowing
# 3rd parties to specify IDs as we do with l2 plugin
LOG.debug(_("MidonetPluginV2.create_router called: router=%(router)s"),
{"router": router})
r = router['router']
tenant_id = self._get_tenant_id_for_create(context, r)
r['tenant_id'] = tenant_id
mido_router = self.client.create_router(**r)
mido_router_id = mido_router.get_id()
try:
has_gw_info = False
if EXTERNAL_GW_INFO in r:
has_gw_info = True
gw_info = r.pop(EXTERNAL_GW_INFO)
with context.session.begin(subtransactions=True):
# pre-generate id so it will be available when
# configuring external gw port
router_db = l3_db.Router(id=mido_router_id,
tenant_id=tenant_id,
name=r['name'],
admin_state_up=r['admin_state_up'],
status="ACTIVE")
context.session.add(router_db)
if has_gw_info:
self._update_router_gw_info(context, router_db['id'],
gw_info)
router_data = self._make_router_dict(router_db)
except Exception:
# Try removing the midonet router
with excutils.save_and_reraise_exception():
self.client.delete_router(mido_router_id)
# Create router chains
chain_names = _nat_chain_names(mido_router_id)
try:
self.client.add_router_chains(mido_router,
chain_names["pre-routing"],
chain_names["post-routing"])
except Exception:
# Set the router status to Error
with context.session.begin(subtransactions=True):
r = self._get_router(context, router_data["id"])
router_data['status'] = constants.NET_STATUS_ERROR
r['status'] = router_data['status']
context.session.add(r)
LOG.debug(_("MidonetPluginV2.create_router exiting: "
"router_data=%(router_data)s."),
{"router_data": router_data})
return router_data
def _set_router_gateway(self, id, gw_router, gw_ip):
"""Set router uplink gateway
:param ID: ID of the router
:param gw_router: gateway router to link to
:param gw_ip: gateway IP address
"""
LOG.debug(_("MidonetPluginV2.set_router_gateway called: id=%(id)s, "
"gw_router=%(gw_router)s, gw_ip=%(gw_ip)s"),
{'id': id, 'gw_router': gw_router, 'gw_ip': gw_ip}),
router = self.client.get_router(id)
# Create a port in the gw router
gw_port = self.client.add_router_port(gw_router,
port_address='169.254.255.1',
network_address='169.254.255.0',
network_length=30)
# Create a port in the router
port = self.client.add_router_port(router,
port_address='169.254.255.2',
network_address='169.254.255.0',
network_length=30)
# Link them
self.client.link(gw_port, port.get_id())
# Add a route for gw_ip to bring it down to the router
self.client.add_router_route(gw_router, type='Normal',
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr=gw_ip,
dst_network_length=32,
next_hop_port=gw_port.get_id(),
weight=100)
# Add default route to uplink in the router
self.client.add_router_route(router, type='Normal',
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr='0.0.0.0',
dst_network_length=0,
next_hop_port=port.get_id(),
weight=100)
def _remove_router_gateway(self, id):
"""Clear router gateway
:param ID: ID of the router
"""
LOG.debug(_("MidonetPluginV2.remove_router_gateway called: "
"id=%(id)s"), {'id': id})
router = self.client.get_router(id)
# delete the port that is connected to the gateway router
for p in router.get_ports():
if p.get_port_address() == '169.254.255.2':
peer_port_id = p.get_peer_id()
if peer_port_id is not None:
self.client.unlink(p)
self.client.delete_port(peer_port_id)
# delete default route
for r in router.get_routes():
if (r.get_dst_network_addr() == '0.0.0.0' and
r.get_dst_network_length() == 0):
self.client.delete_route(r.get_id())
def update_router(self, context, id, router):
"""Handle router updates."""
LOG.debug(_("MidonetPluginV2.update_router called: id=%(id)s "
"router=%(router)r"), {"id": id, "router": router})
router_data = router["router"]
# Check if the update included changes to the gateway.
gw_updated = l3_db.EXTERNAL_GW_INFO in router_data
with context.session.begin(subtransactions=True):
# Update the Neutron DB
r = super(MidonetPluginV2, self).update_router(context, id,
router)
tenant_id = r["tenant_id"]
if gw_updated:
if (l3_db.EXTERNAL_GW_INFO in r and
r[l3_db.EXTERNAL_GW_INFO] is not None):
# Gateway created
gw_port_neutron = self._get_port(
context.elevated(), r["gw_port_id"])
gw_ip = gw_port_neutron['fixed_ips'][0]['ip_address']
# First link routers and set up the routes
self._set_router_gateway(r["id"],
self._get_provider_router(),
gw_ip)
gw_port_midonet = self.client.get_link_port(
self._get_provider_router(), r["id"])
# Get the NAT chains and add dynamic SNAT rules.
chain_names = _nat_chain_names(r["id"])
props = {OS_TENANT_ROUTER_RULE_KEY: SNAT_RULE}
self.client.add_dynamic_snat(tenant_id,
chain_names['pre-routing'],
chain_names['post-routing'],
gw_ip,
gw_port_midonet.get_id(),
**props)
self.client.update_router(id, **router_data)
LOG.debug(_("MidonetPluginV2.update_router exiting: router=%r"), r)
return r
def delete_router(self, context, id):
"""Handler for router deletion.
Deleting a router on Neutron simply means deleting its corresponding
router in MidoNet.
:param id: router ID to remove
"""
LOG.debug(_("MidonetPluginV2.delete_router called: id=%s"), id)
self.client.delete_router_chains(id)
self.client.delete_router(id)
super(MidonetPluginV2, self).delete_router(context, id)
def _link_bridge_to_gw_router(self, bridge, gw_router, gw_ip, cidr):
"""Link a bridge to the gateway router
:param bridge: bridge
:param gw_router: gateway router to link to
:param gw_ip: IP address of gateway
:param cidr: network CIDR
"""
net_addr, net_len = net_util.net_addr(cidr)
# create a port on the gateway router
gw_port = self.client.add_router_port(gw_router, port_address=gw_ip,
network_address=net_addr,
network_length=net_len)
# create a bridge port, then link it to the router.
port = self.client.add_bridge_port(bridge)
self.client.link(gw_port, port.get_id())
# add a route for the subnet in the gateway router
self.client.add_router_route(gw_router, type='Normal',
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr=net_addr,
dst_network_length=net_len,
next_hop_port=gw_port.get_id(),
weight=100)
def _unlink_bridge_from_gw_router(self, bridge, gw_router):
"""Unlink a bridge from the gateway router
:param bridge: bridge to unlink
:param gw_router: gateway router to unlink from
"""
# Delete routes and unlink the router and the bridge.
routes = self.client.get_router_routes(gw_router.get_id())
bridge_ports_to_delete = [
p for p in gw_router.get_peer_ports()
if p.get_device_id() == bridge.get_id()]
for p in bridge.get_peer_ports():
if p.get_device_id() == gw_router.get_id():
# delete the routes going to the bridge
for r in routes:
if r.get_next_hop_port() == p.get_id():
self.client.delete_route(r.get_id())
self.client.unlink(p)
self.client.delete_port(p.get_id())
# delete bridge port
for port in bridge_ports_to_delete:
self.client.delete_port(port.get_id())
def _link_bridge_to_router(self, router, bridge_port, net_addr, net_len,
gw_ip, metadata_gw_ip):
router_port = self.client.add_router_port(
router, network_length=net_len, network_address=net_addr,
port_address=gw_ip, admin_state_up=bridge_port['admin_state_up'])
self.client.link(router_port, bridge_port['id'])
self.client.add_router_route(router, type='Normal',
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr=net_addr,
dst_network_length=net_len,
next_hop_port=router_port.get_id(),
weight=100)
if metadata_gw_ip:
# Add a route for the metadata server.
# Not all VM images supports DHCP option 121. Add a route for the
# Metadata server in the router to forward the packet to the bridge
# that will send them to the Metadata Proxy.
md_net_addr, md_net_len = net_util.net_addr(METADATA_DEFAULT_IP)
self.client.add_router_route(
router, type='Normal', src_network_addr=net_addr,
src_network_length=net_len,
dst_network_addr=md_net_addr,
dst_network_length=md_net_len,
next_hop_port=router_port.get_id(),
next_hop_gateway=metadata_gw_ip)
def _unlink_bridge_from_router(self, router_id, bridge_port_id):
"""Unlink a bridge from a router."""
# Remove the routes to the port and unlink the port
bridge_port = self.client.get_port(bridge_port_id)
routes = self.client.get_router_routes(router_id)
self.client.delete_port_routes(routes, bridge_port.get_peer_id())
self.client.unlink(bridge_port)
def add_router_interface(self, context, router_id, interface_info):
"""Handle router linking with network."""
LOG.debug(_("MidonetPluginV2.add_router_interface called: "
"router_id=%(router_id)s "
"interface_info=%(interface_info)r"),
{'router_id': router_id, 'interface_info': interface_info})
with context.session.begin(subtransactions=True):
info = super(MidonetPluginV2, self).add_router_interface(
context, router_id, interface_info)
try:
subnet = self._get_subnet(context, info["subnet_id"])
cidr = subnet["cidr"]
net_addr, net_len = net_util.net_addr(cidr)
router = self.client.get_router(router_id)
# Get the metadata GW IP
metadata_gw_ip = None
rport_qry = context.session.query(models_v2.Port)
dhcp_ports = rport_qry.filter_by(
network_id=subnet["network_id"],
device_owner=constants.DEVICE_OWNER_DHCP).all()
if dhcp_ports and dhcp_ports[0].fixed_ips:
metadata_gw_ip = dhcp_ports[0].fixed_ips[0].ip_address
else:
LOG.warn(_("DHCP agent is not working correctly. No port "
"to reach the Metadata server on this network"))
# Link the router and the bridge
port = super(MidonetPluginV2, self).get_port(context,
info["port_id"])
self._link_bridge_to_router(router, port, net_addr,
net_len, subnet["gateway_ip"],
metadata_gw_ip)
except Exception:
LOG.error(_("Failed to create MidoNet resources to add router "
"interface. info=%(info)s, router_id=%(router_id)s"),
{"info": info, "router_id": router_id})
with excutils.save_and_reraise_exception():
with context.session.begin(subtransactions=True):
self.remove_router_interface(context, router_id, info)
LOG.debug(_("MidonetPluginV2.add_router_interface exiting: "
"info=%r"), info)
return info
def _assoc_fip(self, fip):
router = self.client.get_router(fip["router_id"])
link_port = self.client.get_link_port(
self._get_provider_router(), router.get_id())
self.client.add_router_route(
self._get_provider_router(),
src_network_addr='0.0.0.0',
src_network_length=0,
dst_network_addr=fip["floating_ip_address"],
dst_network_length=32,
next_hop_port=link_port.get_peer_id())
props = {OS_FLOATING_IP_RULE_KEY: fip['id']}
tenant_id = router.get_tenant_id()
chain_names = _nat_chain_names(router.get_id())
for chain_type, name in chain_names.items():
src_ip, target_ip = _get_nat_ips(chain_type, fip)
if chain_type == 'pre-routing':
nat_type = 'dnat'
else:
nat_type = 'snat'
self.client.add_static_nat(tenant_id, name, src_ip,
target_ip,
link_port.get_id(),
nat_type, **props)
def create_floatingip(self, context, floatingip):
session = context.session
with session.begin(subtransactions=True):
fip = super(MidonetPluginV2, self).create_floatingip(
context, floatingip)
if fip['port_id']:
self._assoc_fip(fip)
return fip
def update_floatingip(self, context, id, floatingip):
"""Handle floating IP association and disassociation."""
LOG.debug(_("MidonetPluginV2.update_floatingip called: id=%(id)s "
"floatingip=%(floatingip)s "),
{'id': id, 'floatingip': floatingip})
session = context.session
with session.begin(subtransactions=True):
if floatingip['floatingip']['port_id']:
fip = super(MidonetPluginV2, self).update_floatingip(
context, id, floatingip)
self._assoc_fip(fip)
# disassociate floating IP
elif floatingip['floatingip']['port_id'] is None:
fip = super(MidonetPluginV2, self).get_floatingip(context, id)
self._remove_nat_rules(context, fip)
super(MidonetPluginV2, self).update_floatingip(context, id,
floatingip)
LOG.debug(_("MidonetPluginV2.update_floating_ip exiting: fip=%s"), fip)
return fip
def disassociate_floatingips(self, context, port_id):
"""Disassociate floating IPs (if any) from this port."""
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_dbs = fip_qry.filter_by(fixed_port_id=port_id)
for fip_db in fip_dbs:
self._remove_nat_rules(context, fip_db)
except sa_exc.NoResultFound:
pass
super(MidonetPluginV2, self).disassociate_floatingips(context, port_id)
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
Create a new security group, including the default security group.
In MidoNet, this means creating a pair of chains, inbound and outbound,
as well as a new port group.
"""
LOG.debug(_("MidonetPluginV2.create_security_group called: "
"security_group=%(security_group)s "
"default_sg=%(default_sg)s "),
{'security_group': security_group, 'default_sg': default_sg})
sg = security_group.get('security_group')
tenant_id = self._get_tenant_id_for_create(context, sg)
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
# Create the Neutron sg first
sg = super(MidonetPluginV2, self).create_security_group(
context, security_group, default_sg)
try:
# Process the MidoNet side
self.client.create_port_group(tenant_id,
_sg_port_group_name(sg["id"]))
chain_names = _sg_chain_names(sg["id"])
chains = {}
for direction, chain_name in chain_names.iteritems():
c = self.client.create_chain(tenant_id, chain_name)
chains[direction] = c
# Create all the rules for this SG. Only accept rules are created
for r in sg['security_group_rules']:
self._create_accept_chain_rule(context, r,
chain=chains[r['direction']])
except Exception:
LOG.error(_("Failed to create MidoNet resources for sg %(sg)r"),
{"sg": sg})
with excutils.save_and_reraise_exception():
with context.session.begin(subtransactions=True):
sg = self._get_security_group(context, sg["id"])
context.session.delete(sg)
LOG.debug(_("MidonetPluginV2.create_security_group exiting: sg=%r"),
sg)
return sg
def delete_security_group(self, context, id):
"""Delete chains for Neutron security group."""
LOG.debug(_("MidonetPluginV2.delete_security_group called: id=%s"), id)
with context.session.begin(subtransactions=True):
sg = super(MidonetPluginV2, self).get_security_group(context, id)
if not sg:
raise ext_sg.SecurityGroupNotFound(id=id)
if sg["name"] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
sg_id = sg['id']
filters = {'security_group_id': [sg_id]}
if super(MidonetPluginV2, self)._get_port_security_group_bindings(
context, filters):
raise ext_sg.SecurityGroupInUse(id=sg_id)
# Delete MidoNet Chains and portgroup for the SG
tenant_id = sg['tenant_id']
self.client.delete_chains_by_names(
tenant_id, _sg_chain_names(sg["id"]).values())
self.client.delete_port_group_by_name(
tenant_id, _sg_port_group_name(sg["id"]))
super(MidonetPluginV2, self).delete_security_group(context, id)
def create_security_group_rule(self, context, security_group_rule):
"""Create a security group rule
Create a security group rule in the Neutron DB and corresponding
MidoNet resources in its data store.
"""
LOG.debug(_("MidonetPluginV2.create_security_group_rule called: "
"security_group_rule=%(security_group_rule)r"),
{'security_group_rule': security_group_rule})
with context.session.begin(subtransactions=True):
rule = super(MidonetPluginV2, self).create_security_group_rule(
context, security_group_rule)
self._create_accept_chain_rule(context, rule)
LOG.debug(_("MidonetPluginV2.create_security_group_rule exiting: "
"rule=%r"), rule)
return rule
def delete_security_group_rule(self, context, sg_rule_id):
"""Delete a security group rule
Delete a security group rule from the Neutron DB and corresponding
MidoNet resources from its data store.
"""
LOG.debug(_("MidonetPluginV2.delete_security_group_rule called: "
"sg_rule_id=%s"), sg_rule_id)
with context.session.begin(subtransactions=True):
rule = super(MidonetPluginV2, self).get_security_group_rule(
context, sg_rule_id)
if not rule:
raise ext_sg.SecurityGroupRuleNotFound(id=sg_rule_id)
sg = self._get_security_group(context,
rule["security_group_id"])
chain_name = _sg_chain_names(sg["id"])[rule["direction"]]
self.client.remove_rules_by_property(rule["tenant_id"], chain_name,
OS_SG_RULE_KEY,
str(rule["id"]))
super(MidonetPluginV2, self).delete_security_group_rule(
context, sg_rule_id)
def _add_chain_rule(self, chain, action, **kwargs):
nw_proto = kwargs.get("nw_proto")
src_addr = kwargs.pop("src_addr", None)
dst_addr = kwargs.pop("dst_addr", None)
src_port_from = kwargs.pop("src_port_from", None)
src_port_to = kwargs.pop("src_port_to", None)
dst_port_from = kwargs.pop("dst_port_from", None)
dst_port_to = kwargs.pop("dst_port_to", None)
# Convert to the keys and values that midonet client understands
if src_addr:
kwargs["nw_src_addr"], kwargs["nw_src_length"] = net_util.net_addr(
src_addr)
if dst_addr:
kwargs["nw_dst_addr"], kwargs["nw_dst_length"] = net_util.net_addr(
dst_addr)
kwargs["tp_src"] = {"start": src_port_from, "end": src_port_to}
kwargs["tp_dst"] = {"start": dst_port_from, "end": dst_port_to}
if nw_proto == 1: # ICMP
# Overwrite port fields regardless of the direction
kwargs["tp_src"] = {"start": src_port_from, "end": src_port_from}
kwargs["tp_dst"] = {"start": dst_port_to, "end": dst_port_to}
return self.client.add_chain_rule(chain, action=action, **kwargs)
| |
# Original work Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modified work Copyright 2020 Yubico AB. All Rights Reserved.
# This file, with modifications, is licensed under the above Apache License.
from .base import OtpYubiKeyDevice, YUBICO_VID, USAGE_OTP
from yubikit.core.otp import OtpConnection
from ctypes import WinDLL, WinError # type: ignore
from ctypes import wintypes, LibraryLoader
import ctypes
import platform
import logging
import re
logger = logging.getLogger(__name__)
# Load relevant DLLs
windll = LibraryLoader(WinDLL)
hid = windll.Hid
setupapi = windll.SetupAPI
kernel32 = windll.Kernel32
# Various structs that are used in the Windows APIs we call
class GUID(ctypes.Structure):
_fields_ = [
("Data1", ctypes.c_ulong),
("Data2", ctypes.c_ushort),
("Data3", ctypes.c_ushort),
("Data4", ctypes.c_ubyte * 8),
]
# On Windows, SetupAPI.h packs structures differently in 64bit and
# 32bit mode. In 64-bit mode, the structures are packed on 8 byte
# boundaries, while in 32-bit mode, they are packed on 1 byte boundaries.
# This is important to get right for some API calls that fill out these
# structures.
if platform.architecture()[0] == "64bit":
SETUPAPI_PACK = 8
elif platform.architecture()[0] == "32bit":
SETUPAPI_PACK = 1
else:
raise OSError(f"Unknown architecture: {platform.architecture()[0]}")
class DeviceInterfaceData(ctypes.Structure):
_fields_ = [
("cbSize", wintypes.DWORD),
("InterfaceClassGuid", GUID),
("Flags", wintypes.DWORD),
("Reserved", ctypes.POINTER(ctypes.c_ulong)),
]
_pack_ = SETUPAPI_PACK
class DeviceInterfaceDetailData(ctypes.Structure):
_fields_ = [("cbSize", wintypes.DWORD), ("DevicePath", ctypes.c_byte * 1)]
_pack_ = SETUPAPI_PACK
class HidAttributes(ctypes.Structure):
_fields_ = [
("Size", ctypes.c_ulong),
("VendorID", ctypes.c_ushort),
("ProductID", ctypes.c_ushort),
("VersionNumber", ctypes.c_ushort),
]
class HidCapabilities(ctypes.Structure):
_fields_ = [
("Usage", ctypes.c_ushort),
("UsagePage", ctypes.c_ushort),
("InputReportByteLength", ctypes.c_ushort),
("OutputReportByteLength", ctypes.c_ushort),
("FeatureReportByteLength", ctypes.c_ushort),
("Reserved", ctypes.c_ushort * 17),
("NotUsed", ctypes.c_ushort * 10),
]
# Various void* aliases for readability.
HDEVINFO = ctypes.c_void_p
HANDLE = ctypes.c_void_p
PHIDP_PREPARSED_DATA = ctypes.c_void_p # pylint: disable=invalid-name
# This is a HANDLE.
INVALID_HANDLE_VALUE = 0xFFFFFFFF
# Status codes
FILE_SHARE_READ = 0x00000001
FILE_SHARE_WRITE = 0x00000002
OPEN_EXISTING = 0x03
NTSTATUS = ctypes.c_long
HIDP_STATUS_SUCCESS = 0x00110000
# CreateFile Flags
GENERIC_WRITE = 0x40000000
GENERIC_READ = 0x80000000
DIGCF_DEVICEINTERFACE = 0x10
DIGCF_PRESENT = 0x02
# Function signatures
hid.HidD_GetHidGuid.restype = None
hid.HidD_GetHidGuid.argtypes = [ctypes.POINTER(GUID)]
hid.HidD_GetAttributes.restype = wintypes.BOOLEAN
hid.HidD_GetAttributes.argtypes = [HANDLE, ctypes.POINTER(HidAttributes)]
hid.HidD_GetPreparsedData.restype = wintypes.BOOLEAN
hid.HidD_GetPreparsedData.argtypes = [HANDLE, ctypes.POINTER(PHIDP_PREPARSED_DATA)]
hid.HidD_FreePreparsedData.restype = wintypes.BOOLEAN
hid.HidD_FreePreparsedData.argtypes = [PHIDP_PREPARSED_DATA]
hid.HidD_GetProductString.restype = wintypes.BOOLEAN
hid.HidD_GetProductString.argtypes = [HANDLE, ctypes.c_void_p, ctypes.c_ulong]
hid.HidP_GetCaps.restype = NTSTATUS
hid.HidP_GetCaps.argtypes = [PHIDP_PREPARSED_DATA, ctypes.POINTER(HidCapabilities)]
hid.HidD_GetFeature.restype = wintypes.BOOL
hid.HidD_GetFeature.argtypes = [HANDLE, ctypes.c_void_p, ctypes.c_ulong]
hid.HidD_SetFeature.restype = wintypes.BOOL
hid.HidD_SetFeature.argtypes = [HANDLE, ctypes.c_void_p, ctypes.c_ulong]
setupapi.SetupDiGetClassDevsA.argtypes = [
ctypes.POINTER(GUID),
ctypes.c_char_p,
wintypes.HWND,
wintypes.DWORD,
]
setupapi.SetupDiGetClassDevsA.restype = HDEVINFO
setupapi.SetupDiEnumDeviceInterfaces.restype = wintypes.BOOL
setupapi.SetupDiEnumDeviceInterfaces.argtypes = [
HDEVINFO,
ctypes.c_void_p,
ctypes.POINTER(GUID),
wintypes.DWORD,
ctypes.POINTER(DeviceInterfaceData),
]
setupapi.SetupDiGetDeviceInterfaceDetailA.restype = wintypes.BOOL
setupapi.SetupDiGetDeviceInterfaceDetailA.argtypes = [
HDEVINFO,
ctypes.POINTER(DeviceInterfaceData),
ctypes.POINTER(DeviceInterfaceDetailData),
wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD),
ctypes.c_void_p,
]
setupapi.SetupDiDestroyDeviceInfoList.restype = wintypes.BOOL
setupapi.SetupDiDestroyDeviceInfoList.argtypes = [
HDEVINFO,
]
kernel32.CreateFileA.restype = HANDLE
kernel32.CreateFileA.argtypes = [
ctypes.c_char_p,
wintypes.DWORD,
wintypes.DWORD,
ctypes.c_void_p,
wintypes.DWORD,
wintypes.DWORD,
HANDLE,
]
kernel32.CloseHandle.restype = wintypes.BOOL
kernel32.CloseHandle.argtypes = [HANDLE]
class WinHidOtpConnection(OtpConnection):
def __init__(self, path):
self.handle = kernel32.CreateFileA(
path,
GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE,
None,
OPEN_EXISTING,
0,
None,
)
if self.handle == INVALID_HANDLE_VALUE:
raise WinError()
def close(self):
if self.handle:
kernel32.CloseHandle(self.handle)
self.handle = None
def receive(self):
buf = ctypes.create_string_buffer(9)
result = hid.HidD_GetFeature(self.handle, buf, ctypes.sizeof(buf))
if not result:
raise WinError()
return buf.raw[1:]
def send(self, data):
buf = ctypes.create_string_buffer(b"\0" + bytes(data))
result = hid.HidD_SetFeature(self.handle, buf, ctypes.sizeof(buf))
if not result:
raise WinError()
def get_vid_pid(device):
attributes = HidAttributes()
result = hid.HidD_GetAttributes(device, ctypes.byref(attributes))
if not result:
raise WinError()
return attributes.VendorID, attributes.ProductID
def get_usage(device):
preparsed_data = PHIDP_PREPARSED_DATA(0)
ret = hid.HidD_GetPreparsedData(device, ctypes.byref(preparsed_data))
if not ret:
raise WinError()
try:
caps = HidCapabilities()
ret = hid.HidP_GetCaps(preparsed_data, ctypes.byref(caps))
if ret != HIDP_STATUS_SUCCESS:
raise WinError()
return caps.UsagePage, caps.Usage
finally:
hid.HidD_FreePreparsedData(preparsed_data)
VID_RE = re.compile(rb"\Wvid_%04x\W" % YUBICO_VID)
PID_RE = re.compile(rb"\Wpid_([a-z0-9]{4})\W")
def list_paths():
hid_guid = GUID()
hid.HidD_GetHidGuid(ctypes.byref(hid_guid))
collection = setupapi.SetupDiGetClassDevsA(
ctypes.byref(hid_guid), None, None, DIGCF_DEVICEINTERFACE | DIGCF_PRESENT
)
try:
index = 0
interface_info = DeviceInterfaceData()
interface_info.cbSize = ctypes.sizeof(DeviceInterfaceData)
paths = []
while True:
result = setupapi.SetupDiEnumDeviceInterfaces(
collection,
0,
ctypes.byref(hid_guid),
index,
ctypes.byref(interface_info),
)
index += 1
if not result:
break
detail_len_dw = wintypes.DWORD()
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
collection,
ctypes.byref(interface_info),
None,
0,
ctypes.byref(detail_len_dw),
None,
)
if result:
raise WinError()
detail_len = detail_len_dw.value
if detail_len == 0:
# skip this device, some kind of error
continue
buf = ctypes.create_string_buffer(detail_len)
interface_detail = DeviceInterfaceDetailData.from_buffer(buf)
interface_detail.cbSize = ctypes.sizeof(DeviceInterfaceDetailData)
result = setupapi.SetupDiGetDeviceInterfaceDetailA(
collection,
ctypes.byref(interface_info),
ctypes.byref(interface_detail),
detail_len,
None,
None,
)
if not result:
raise WinError()
path = ctypes.string_at(ctypes.addressof(interface_detail.DevicePath))
if VID_RE.search(path):
pid_match = PID_RE.search(path)
if pid_match:
paths.append((int(pid_match.group(1), 16), path))
return paths
finally:
setupapi.SetupDiDestroyDeviceInfoList(collection)
def list_devices():
devices = []
for pid, path in list_paths():
device = kernel32.CreateFileA(
path,
0,
FILE_SHARE_READ | FILE_SHARE_WRITE,
None,
OPEN_EXISTING,
0,
None,
)
if device == INVALID_HANDLE_VALUE:
logger.debug("Failed reading HID descriptor: INVALID_HANDLE")
continue
try:
usage = get_usage(device)
if usage == USAGE_OTP:
devices.append(OtpYubiKeyDevice(path, pid, WinHidOtpConnection))
except Exception as e:
logger.debug("Failed reading HID descriptor: %s", e)
continue
finally:
kernel32.CloseHandle(device)
return devices
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.secretmanager_v1beta1.types import resources
from google.cloud.secretmanager_v1beta1.types import service
from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
from google.iam.v1 import policy_pb2 as policy # type: ignore
from google.protobuf import empty_pb2 as empty # type: ignore
from .base import SecretManagerServiceTransport, DEFAULT_CLIENT_INFO
class SecretManagerServiceGrpcTransport(SecretManagerServiceTransport):
"""gRPC backend transport for SecretManagerService.
Secret Manager Service
Manages secrets and operations using those secrets. Implements a
REST model with the following objects:
- [Secret][google.cloud.secrets.v1beta1.Secret]
- [SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "secretmanager.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "secretmanager.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_secrets(
self,
) -> Callable[[service.ListSecretsRequest], service.ListSecretsResponse]:
r"""Return a callable for the list secrets method over gRPC.
Lists [Secrets][google.cloud.secrets.v1beta1.Secret].
Returns:
Callable[[~.ListSecretsRequest],
~.ListSecretsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_secrets" not in self._stubs:
self._stubs["list_secrets"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/ListSecrets",
request_serializer=service.ListSecretsRequest.serialize,
response_deserializer=service.ListSecretsResponse.deserialize,
)
return self._stubs["list_secrets"]
@property
def create_secret(
self,
) -> Callable[[service.CreateSecretRequest], resources.Secret]:
r"""Return a callable for the create secret method over gRPC.
Creates a new [Secret][google.cloud.secrets.v1beta1.Secret]
containing no
[SecretVersions][google.cloud.secrets.v1beta1.SecretVersion].
Returns:
Callable[[~.CreateSecretRequest],
~.Secret]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_secret" not in self._stubs:
self._stubs["create_secret"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/CreateSecret",
request_serializer=service.CreateSecretRequest.serialize,
response_deserializer=resources.Secret.deserialize,
)
return self._stubs["create_secret"]
@property
def add_secret_version(
self,
) -> Callable[[service.AddSecretVersionRequest], resources.SecretVersion]:
r"""Return a callable for the add secret version method over gRPC.
Creates a new
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
containing secret data and attaches it to an existing
[Secret][google.cloud.secrets.v1beta1.Secret].
Returns:
Callable[[~.AddSecretVersionRequest],
~.SecretVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "add_secret_version" not in self._stubs:
self._stubs["add_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/AddSecretVersion",
request_serializer=service.AddSecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["add_secret_version"]
@property
def get_secret(self) -> Callable[[service.GetSecretRequest], resources.Secret]:
r"""Return a callable for the get secret method over gRPC.
Gets metadata for a given
[Secret][google.cloud.secrets.v1beta1.Secret].
Returns:
Callable[[~.GetSecretRequest],
~.Secret]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_secret" not in self._stubs:
self._stubs["get_secret"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/GetSecret",
request_serializer=service.GetSecretRequest.serialize,
response_deserializer=resources.Secret.deserialize,
)
return self._stubs["get_secret"]
@property
def update_secret(
self,
) -> Callable[[service.UpdateSecretRequest], resources.Secret]:
r"""Return a callable for the update secret method over gRPC.
Updates metadata of an existing
[Secret][google.cloud.secrets.v1beta1.Secret].
Returns:
Callable[[~.UpdateSecretRequest],
~.Secret]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_secret" not in self._stubs:
self._stubs["update_secret"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/UpdateSecret",
request_serializer=service.UpdateSecretRequest.serialize,
response_deserializer=resources.Secret.deserialize,
)
return self._stubs["update_secret"]
@property
def delete_secret(self) -> Callable[[service.DeleteSecretRequest], empty.Empty]:
r"""Return a callable for the delete secret method over gRPC.
Deletes a [Secret][google.cloud.secrets.v1beta1.Secret].
Returns:
Callable[[~.DeleteSecretRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_secret" not in self._stubs:
self._stubs["delete_secret"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/DeleteSecret",
request_serializer=service.DeleteSecretRequest.serialize,
response_deserializer=empty.Empty.FromString,
)
return self._stubs["delete_secret"]
@property
def list_secret_versions(
self,
) -> Callable[
[service.ListSecretVersionsRequest], service.ListSecretVersionsResponse
]:
r"""Return a callable for the list secret versions method over gRPC.
Lists
[SecretVersions][google.cloud.secrets.v1beta1.SecretVersion].
This call does not return secret data.
Returns:
Callable[[~.ListSecretVersionsRequest],
~.ListSecretVersionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_secret_versions" not in self._stubs:
self._stubs["list_secret_versions"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/ListSecretVersions",
request_serializer=service.ListSecretVersionsRequest.serialize,
response_deserializer=service.ListSecretVersionsResponse.deserialize,
)
return self._stubs["list_secret_versions"]
@property
def get_secret_version(
self,
) -> Callable[[service.GetSecretVersionRequest], resources.SecretVersion]:
r"""Return a callable for the get secret version method over gRPC.
Gets metadata for a
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
``projects/*/secrets/*/versions/latest`` is an alias to the
``latest``
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Returns:
Callable[[~.GetSecretVersionRequest],
~.SecretVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_secret_version" not in self._stubs:
self._stubs["get_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/GetSecretVersion",
request_serializer=service.GetSecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["get_secret_version"]
@property
def access_secret_version(
self,
) -> Callable[
[service.AccessSecretVersionRequest], service.AccessSecretVersionResponse
]:
r"""Return a callable for the access secret version method over gRPC.
Accesses a
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
This call returns the secret data.
``projects/*/secrets/*/versions/latest`` is an alias to the
``latest``
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Returns:
Callable[[~.AccessSecretVersionRequest],
~.AccessSecretVersionResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "access_secret_version" not in self._stubs:
self._stubs["access_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/AccessSecretVersion",
request_serializer=service.AccessSecretVersionRequest.serialize,
response_deserializer=service.AccessSecretVersionResponse.deserialize,
)
return self._stubs["access_secret_version"]
@property
def disable_secret_version(
self,
) -> Callable[[service.DisableSecretVersionRequest], resources.SecretVersion]:
r"""Return a callable for the disable secret version method over gRPC.
Disables a
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Sets the
[state][google.cloud.secrets.v1beta1.SecretVersion.state] of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion] to
[DISABLED][google.cloud.secrets.v1beta1.SecretVersion.State.DISABLED].
Returns:
Callable[[~.DisableSecretVersionRequest],
~.SecretVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "disable_secret_version" not in self._stubs:
self._stubs["disable_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/DisableSecretVersion",
request_serializer=service.DisableSecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["disable_secret_version"]
@property
def enable_secret_version(
self,
) -> Callable[[service.EnableSecretVersionRequest], resources.SecretVersion]:
r"""Return a callable for the enable secret version method over gRPC.
Enables a
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Sets the
[state][google.cloud.secrets.v1beta1.SecretVersion.state] of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion] to
[ENABLED][google.cloud.secrets.v1beta1.SecretVersion.State.ENABLED].
Returns:
Callable[[~.EnableSecretVersionRequest],
~.SecretVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "enable_secret_version" not in self._stubs:
self._stubs["enable_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/EnableSecretVersion",
request_serializer=service.EnableSecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["enable_secret_version"]
@property
def destroy_secret_version(
self,
) -> Callable[[service.DestroySecretVersionRequest], resources.SecretVersion]:
r"""Return a callable for the destroy secret version method over gRPC.
Destroys a
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
Sets the
[state][google.cloud.secrets.v1beta1.SecretVersion.state] of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion] to
[DESTROYED][google.cloud.secrets.v1beta1.SecretVersion.State.DESTROYED]
and irrevocably destroys the secret data.
Returns:
Callable[[~.DestroySecretVersionRequest],
~.SecretVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "destroy_secret_version" not in self._stubs:
self._stubs["destroy_secret_version"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/DestroySecretVersion",
request_serializer=service.DestroySecretVersionRequest.serialize,
response_deserializer=resources.SecretVersion.deserialize,
)
return self._stubs["destroy_secret_version"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on the specified secret. Replaces
any existing policy.
Permissions on
[SecretVersions][google.cloud.secrets.v1beta1.SecretVersion] are
enforced according to the policy set on the associated
[Secret][google.cloud.secrets.v1beta1.Secret].
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/SetIamPolicy",
request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a secret.
Returns empty policy if the secret exists and does not
have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/GetIamPolicy",
request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse
]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that a caller has for the specified secret.
If the secret does not exist, this call returns an empty set of
permissions, not a NOT_FOUND error.
Note: This operation is designed to be used for building
permission-aware UIs and command-line tools, not for
authorization checking. This operation may "fail open" without
warning.
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.cloud.secrets.v1beta1.SecretManagerService/TestIamPermissions",
request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
__all__ = ("SecretManagerServiceGrpcTransport",)
| |
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import six
import time
import uuid
from xml.etree import ElementTree as ET
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.huawei import constants
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
opts_capability = {
'smarttier': False,
'smartcache': False,
'smartpartition': False,
'thin_provisioning_support': False,
'thick_provisioning_support': False,
}
opts_value = {
'policy': None,
'partitionname': None,
'cachename': None,
}
opts_associate = {
'smarttier': 'policy',
'smartcache': 'cachename',
'smartpartition': 'partitionname',
}
def get_volume_params(volume):
opts = {}
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = dict(volume_type).get('extra_specs')
opts = _get_extra_spec_value(specs)
else:
opts.update(opts_capability)
opts.update(opts_value)
return opts
def _get_extra_spec_value(specs):
"""Return the parameters for creating the volume."""
opts = {}
opts.update(opts_capability)
opts.update(opts_value)
opts = _get_opts_from_specs(opts_capability, opts_value, specs)
LOG.debug('get_volume_params opts %(opts)s.', {'opts': opts})
return opts
def _get_opts_from_specs(opts_capability, opts_value, specs):
opts = {}
opts.update(opts_capability)
opts.update(opts_value)
for key, value in specs.items():
# Get the scope, if is using scope format.
scope = None
key_split = key.split(':')
if len(key_split) > 2 and key_split[0] != "capabilities":
continue
if len(key_split) == 1:
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
if scope:
scope = scope.lower()
if key:
key = key.lower()
if ((not scope or scope == 'capabilities')
and key in opts_capability):
words = value.split()
if not (words and len(words) == 2 and words[0] == '<is>'):
LOG.error(_LE("Extra specs must be specified as "
"capabilities:%s='<is> True' or "
"'<is> true'."), key)
else:
opts[key] = words[1].lower()
if (scope in opts_capability) and (key in opts_value):
if (scope in opts_associate) and (opts_associate[scope] == key):
opts[key] = value
return opts
def _get_smartx_specs_params(lunsetinfo, smartx_opts):
"""Get parameters from config file for creating lun."""
# Default lun set information.
if 'LUNType' in smartx_opts:
lunsetinfo['LUNType'] = smartx_opts['LUNType']
lunsetinfo['policy'] = smartx_opts['policy']
return lunsetinfo
def get_lun_params(xml_file_path, smartx_opts):
lunsetinfo = get_lun_conf_params(xml_file_path)
lunsetinfo = _get_smartx_specs_params(lunsetinfo, smartx_opts)
return lunsetinfo
def parse_xml_file(xml_file_path):
"""Get root of xml file."""
try:
tree = ET.parse(xml_file_path)
root = tree.getroot()
return root
except IOError as err:
LOG.error(_LE('parse_xml_file: %s.'), err)
raise
def get_xml_item(xml_root, item):
"""Get the given item details.
:param xml_root: The root of xml tree
:param item: The tag need to get
:return: A dict contains all the config info of the given item.
"""
items_list = []
items = xml_root.findall(item)
for item in items:
tmp_dict = {'text': None, 'attrib': {}}
if item.text:
tmp_dict['text'] = item.text.strip()
for key, val in item.attrib.items():
if val:
item.attrib[key] = val.strip()
tmp_dict['attrib'] = item.attrib
items_list.append(tmp_dict)
return items_list
def get_conf_host_os_type(host_ip, conf):
"""Get host OS type from xml config file.
:param host_ip: The IP of Nova host
:param config: xml config file
:return: host OS type
"""
os_conf = {}
xml_file_path = conf.cinder_huawei_conf_file
root = parse_xml_file(xml_file_path)
hosts_list = get_xml_item(root, 'Host')
for host in hosts_list:
os = host['attrib']['OSType'].strip()
ips = [ip.strip() for ip in host['attrib']['HostIP'].split(',')]
os_conf[os] = ips
host_os = None
for k, v in os_conf.items():
if host_ip in v:
host_os = constants.OS_TYPE.get(k, None)
if not host_os:
host_os = constants.OS_TYPE['Linux'] # Default OS type.
LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.',
{'ip': host_ip, 'os': host_os})
return host_os
def get_qos_by_volume_type(volume_type):
qos = {}
qos_specs_id = volume_type.get('qos_specs_id')
# We prefer the qos_specs association
# and override any existing extra-specs settings
# if present.
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(context.get_admin_context(),
qos_specs_id)['specs']
else:
return qos
LOG.info(_LI('The QoS sepcs is: %s.'), kvs)
for key, value in kvs.items():
if key in constants.HUAWEI_VALID_KEYS:
if (key.upper() != 'IOTYPE') and (int(value) <= 0):
err_msg = (_('Qos config is wrong. %(key)s'
' must be set greater than 0.')
% {'key': key})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
elif (key.upper() == 'IOTYPE') and (value not in ['0', '1', '2']):
raise exception.InvalidInput(
reason=(_('Illegal value specified for IOTYPE: '
'set to either 0, 1, or 2.')))
else:
qos[key.upper()] = value
return qos
def get_volume_qos(volume):
qos = {}
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos = get_qos_by_volume_type(volume_type)
return qos
def _get_volume_type(type_id):
ctxt = context.get_admin_context()
return volume_types.get_volume_type(ctxt, type_id)
def get_lun_conf_params(xml_file_path):
"""Get parameters from config file for creating lun."""
lunsetinfo = {
'LUNType': 0,
'StripUnitSize': '64',
'WriteType': '1',
'MirrorSwitch': '1',
'PrefetchType': '3',
'PrefetchValue': '0',
'PrefetchTimes': '0',
'policy': '0',
'readcachepolicy': '2',
'writecachepolicy': '5',
}
# Default lun set information.
root = parse_xml_file(xml_file_path)
luntype = root.findtext('LUN/LUNType')
if luntype:
if luntype.strip() in ['Thick', 'Thin']:
lunsetinfo['LUNType'] = luntype.strip()
if luntype.strip() == 'Thick':
lunsetinfo['LUNType'] = 0
elif luntype.strip() == 'Thin':
lunsetinfo['LUNType'] = 1
else:
err_msg = (_(
"LUNType config is wrong. LUNType must be 'Thin'"
" or 'Thick'. LUNType: %(fetchtype)s.")
% {'fetchtype': luntype})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
else:
lunsetinfo['LUNType'] = 0
stripunitsize = root.findtext('LUN/StripUnitSize')
if stripunitsize is not None:
lunsetinfo['StripUnitSize'] = stripunitsize.strip()
writetype = root.findtext('LUN/WriteType')
if writetype is not None:
lunsetinfo['WriteType'] = writetype.strip()
mirrorswitch = root.findtext('LUN/MirrorSwitch')
if mirrorswitch is not None:
lunsetinfo['MirrorSwitch'] = mirrorswitch.strip()
prefetch = root.find('LUN/Prefetch')
if prefetch is not None and prefetch.attrib['Type']:
fetchtype = prefetch.attrib['Type']
if fetchtype in ['0', '1', '2', '3']:
lunsetinfo['PrefetchType'] = fetchtype.strip()
typevalue = prefetch.attrib['Value'].strip()
if lunsetinfo['PrefetchType'] == '1':
double_value = int(typevalue) * 2
typevalue_double = six.text_type(double_value)
lunsetinfo['PrefetchValue'] = typevalue_double
elif lunsetinfo['PrefetchType'] == '2':
lunsetinfo['PrefetchValue'] = typevalue
else:
err_msg = (_(
'PrefetchType config is wrong. PrefetchType'
' must be in 0,1,2,3. PrefetchType is: %(fetchtype)s.')
% {'fetchtype': fetchtype})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
else:
LOG.info(_LI(
'Use default PrefetchType. '
'PrefetchType: Intelligent.'))
return lunsetinfo
def find_luntype_in_xml(xml_file_path):
root = parse_xml_file(xml_file_path)
luntype = root.findtext('LUN/LUNType')
if luntype:
if luntype.strip() in ['Thick', 'Thin']:
if luntype.strip() == 'Thick':
luntype = constants.THICK_LUNTYPE
elif luntype.strip() == 'Thin':
luntype = constants.THIN_LUNTYPE
else:
err_msg = (_(
"LUNType config is wrong. LUNType must be 'Thin'"
" or 'Thick'. LUNType: %(fetchtype)s.")
% {'fetchtype': luntype})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
else:
luntype = constants.THICK_LUNTYPE
return luntype
def encode_name(name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes)
vol_encoded = vol_encoded.decode("utf-8") # Make it compatible to py3.
newuuid = vol_encoded.replace("=", "")
return newuuid
def init_lun_parameters(name, parameters):
"""Initialize basic LUN parameters."""
lunparam = {"TYPE": "11",
"NAME": name,
"PARENTTYPE": "216",
"PARENTID": parameters['pool_id'],
"DESCRIPTION": parameters['volume_description'],
"ALLOCTYPE": parameters['LUNType'],
"CAPACITY": parameters['volume_size'],
"WRITEPOLICY": parameters['WriteType'],
"MIRRORPOLICY": parameters['MirrorSwitch'],
"PREFETCHPOLICY": parameters['PrefetchType'],
"PREFETCHVALUE": parameters['PrefetchValue'],
"DATATRANSFERPOLICY": parameters['policy'],
"READCACHEPOLICY": parameters['readcachepolicy'],
"WRITECACHEPOLICY": parameters['writecachepolicy'],
}
return lunparam
def volume_in_use(volume):
"""Check if the given volume is in use."""
return (volume['volume_attachment'] and
len(volume['volume_attachment']) > 0)
def get_wait_interval(xml_file_path, event_type):
"""Get wait interval from huawei conf file."""
root = parse_xml_file(xml_file_path)
wait_interval = root.findtext('LUN/%s' % event_type)
if wait_interval is None:
wait_interval = constants.DEFAULT_WAIT_INTERVAL
LOG.info(_LI(
"Wait interval for %(event_type)s is not configured in huawei "
"conf file. Use default: %(default_wait_interval)d."),
{"event_type": event_type,
"default_wait_interval": wait_interval})
return int(wait_interval)
def get_default_timeout(xml_file_path):
"""Get timeout from huawei conf file."""
root = parse_xml_file(xml_file_path)
timeout = root.findtext('LUN/Timeout')
if timeout is None:
timeout = constants.DEFAULT_WAIT_TIMEOUT
LOG.info(_LI(
"Timeout is not configured in huawei conf file. "
"Use default: %(default_timeout)d."),
{"default_timeout": timeout})
return timeout
def wait_for_condition(xml_file_path, func, interval, timeout=None):
start_time = time.time()
if timeout is None:
timeout = get_default_timeout(xml_file_path)
def _inner():
try:
res = func()
except Exception as ex:
raise exception.VolumeBackendAPIException(data=ex)
if res:
raise loopingcall.LoopingCallDone()
if int(time.time()) - start_time > timeout:
msg = (_('wait_for_condition: %s timed out.')
% func.__name__)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=interval).wait()
def get_login_info(xml_file_path):
"""Get login IP, user name and password from config file."""
login_info = {}
root = parse_xml_file(xml_file_path)
login_info['RestURL'] = root.findtext('Storage/RestURL').strip()
for key in ['UserName', 'UserPassword']:
node = root.find('Storage/%s' % key)
node_text = node.text
login_info[key] = node_text
return login_info
def _change_file_mode(filepath):
utils.execute('chmod', '640', filepath, run_as_root=True)
def get_iscsi_conf(xml_file_path):
"""Get iSCSI info from config file."""
iscsiinfo = {}
root = parse_xml_file(xml_file_path)
target_ip = root.findtext('iSCSI/DefaultTargetIP').strip()
iscsiinfo['DefaultTargetIP'] = target_ip
initiator_list = []
for dic in root.findall('iSCSI/Initiator'):
# Strip values of dict.
tmp_dic = {}
for k in dic.items():
tmp_dic[k[0]] = k[1].strip()
initiator_list.append(tmp_dic)
iscsiinfo['Initiator'] = initiator_list
return iscsiinfo
def check_qos_high_priority(qos):
"""Check QoS priority."""
for key, value in qos.items():
if (key.find('MIN') == 0) or (key.find('LATENCY') == 0):
return True
return False
def check_conf_file(xml_file_path):
"""Check the config file, make sure the essential items are set."""
root = parse_xml_file(xml_file_path)
resturl = root.findtext('Storage/RestURL')
username = root.findtext('Storage/UserName')
pwd = root.findtext('Storage/UserPassword')
pool_node = root.findall('LUN/StoragePool')
if (not resturl) or (not username) or (not pwd):
err_msg = (_(
'check_conf_file: Config file invalid. RestURL,'
' UserName and UserPassword must be set.'))
LOG.error(err_msg)
raise exception.InvalidInput(reason=err_msg)
if not pool_node:
err_msg = (_(
'check_conf_file: Config file invalid. '
'StoragePool must be set.'))
LOG.error(err_msg)
raise exception.InvalidInput(reason=err_msg)
def get_volume_size(volume):
"""Calculate the volume size.
We should divide the given volume size by 512 for the 18000 system
calculates volume size with sectors, which is 512 bytes.
"""
volume_size = units.Gi / 512 # 1G
if int(volume['size']) != 0:
volume_size = int(volume['size']) * units.Gi / 512
return volume_size
def get_protocol(xml_file_path):
"""Get protocol from huawei conf file."""
root = parse_xml_file(xml_file_path)
protocol = root.findtext('Storage/Protocol')
if not protocol:
err_msg = (_('Get protocol from huawei conf file error.'))
LOG.error(err_msg)
raise exception.InvalidInput(reason=err_msg)
return protocol
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Contains routines for printing protocol messages in text format."""
import cStringIO
import re
from google.net.proto2.python.internal import type_checkers
from google.net.proto2.python.public import descriptor
from google.net.proto2.python.public import text_encoding
__all__ = ['MessageToString', 'PrintMessage', 'PrintField',
'PrintFieldValue', 'Merge']
_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker())
_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile('nanf?', re.IGNORECASE)
_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,
descriptor.FieldDescriptor.CPPTYPE_DOUBLE])
class Error(Exception):
"""Top-level module error for text_format."""
class ParseError(Error):
"""Thrown in case of ASCII parsing error."""
def MessageToString(message, as_utf8=False, as_one_line=False,
pointy_brackets=False, use_index_order=False,
float_format=None):
"""Convert protobuf message to text format.
Floating point values can be formatted compactly with 15 digits of
precision (which is the most that IEEE 754 "double" can guarantee)
using float_format='.15g'.
Args:
message: The protocol buffers message.
as_utf8: Produce text output in UTF8 format.
as_one_line: Don't introduce newlines between fields.
pointy_brackets: If True, use angle brackets instead of curly braces for
nesting.
use_index_order: If True, print fields of a proto message using the order
defined in source code instead of the field number. By default, use the
field number order.
float_format: If set, use this to specify floating point number formatting
(per the "Format Specification Mini-Language"); otherwise, str() is used.
Returns:
A string of the text formatted protocol buffer message.
"""
out = cStringIO.StringIO()
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line,
pointy_brackets=pointy_brackets,
use_index_order=use_index_order,
float_format=float_format)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False,
pointy_brackets=False, use_index_order=False,
float_format=None):
fields = message.ListFields()
if use_index_order:
fields.sort(key=lambda x: x[0].index)
for field, value in fields:
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
PrintField(field, element, out, indent, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
use_index_order=use_index_order,
float_format=float_format)
else:
PrintField(field, value, out, indent, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
use_index_order=use_index_order,
float_format=float_format)
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False,
pointy_brackets=False, use_index_order=False, float_format=None):
"""Print a single field name/value pair. For repeated fields, the value
should be a single element."""
out.write(' ' * indent)
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
out.write(': ')
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
use_index_order=use_index_order,
float_format=float_format)
if as_one_line:
out.write(' ')
else:
out.write('\n')
def PrintFieldValue(field, value, out, indent=0, as_utf8=False,
as_one_line=False, pointy_brackets=False,
use_index_order=False,
float_format=None):
"""Print a single field value (not including name). For repeated fields,
the value should be a single element."""
if pointy_brackets:
openb = '<'
closeb = '>'
else:
openb = '{'
closeb = '}'
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if as_one_line:
out.write(' %s ' % openb)
PrintMessage(value, out, indent, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
use_index_order=use_index_order,
float_format=float_format)
out.write(closeb)
else:
out.write(' %s\n' % openb)
PrintMessage(value, out, indent + 2, as_utf8, as_one_line,
pointy_brackets=pointy_brackets,
use_index_order=use_index_order,
float_format=float_format)
out.write(' ' * indent + closeb)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
out.write(enum_value.name)
else:
out.write(str(value))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if isinstance(value, unicode):
out_value = value.encode('utf-8')
else:
out_value = value
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
out_as_utf8 = False
else:
out_as_utf8 = as_utf8
out.write(text_encoding.CEscape(out_value, out_as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write('true')
else:
out.write('false')
elif field.cpp_type in _FLOAT_TYPES and float_format is not None:
out.write('{1:{0}}'.format(float_format, value))
else:
out.write(str(value))
def Parse(text, message):
"""Parses an ASCII representation of a protocol message into a message.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Returns:
The same message passed as argument.
Raises:
ParseError: On ASCII parsing problems.
"""
if not isinstance(text, str): text = text.decode('utf-8')
return ParseLines(text.split('\n'), message)
def Merge(text, message):
"""Parses an ASCII representation of a protocol message into a message.
Like Parse(), but allows repeated values for a non-repeated field, and uses
the last one.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Returns:
The same message passed as argument.
Raises:
ParseError: On ASCII parsing problems.
"""
return MergeLines(text.split('\n'), message)
def ParseLines(lines, message):
"""Parses an ASCII representation of a protocol message into a message.
Args:
lines: An iterable of lines of a message's ASCII representation.
message: A protocol buffer message to merge into.
Returns:
The same message passed as argument.
Raises:
ParseError: On ASCII parsing problems.
"""
_ParseOrMerge(lines, message, False)
return message
def MergeLines(lines, message):
"""Parses an ASCII representation of a protocol message into a message.
Args:
lines: An iterable of lines of a message's ASCII representation.
message: A protocol buffer message to merge into.
Returns:
The same message passed as argument.
Raises:
ParseError: On ASCII parsing problems.
"""
_ParseOrMerge(lines, message, True)
return message
def _ParseOrMerge(lines, message, allow_multiple_scalars):
"""Converts an ASCII representation of a protocol message into a message.
Args:
lines: Lines of a message's ASCII representation.
message: A protocol buffer message to merge into.
allow_multiple_scalars: Determines if repeated values for a non-repeated
field are permitted, e.g., the string "foo: 1 foo: 2" for a
required/optional field named "foo".
Raises:
ParseError: On ASCII parsing problems.
"""
tokenizer = _Tokenizer(lines)
while not tokenizer.AtEnd():
_MergeField(tokenizer, message, allow_multiple_scalars)
def _MergeField(tokenizer, message, allow_multiple_scalars):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
allow_multiple_scalars: Determines if repeated values for a non-repeated
field are permitted, e.g., the string "foo: 1 foo: 2" for a
required/optional field named "foo".
Raises:
ParseError: In case of ASCII parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
field = message.Extensions._FindExtensionByName(name)
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' % (
name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifier()
field = message_descriptor.fields_by_name.get(name, None)
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' % (
message_descriptor.full_name, name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
_MergeField(tokenizer, sub_message, allow_multiple_scalars)
else:
_MergeScalarField(tokenizer, message, field, allow_multiple_scalars)
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';')
def _MergeScalarField(tokenizer, message, field, allow_multiple_scalars):
"""Merges a single protocol message scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
allow_multiple_scalars: Determines if repeated values for a non-repeated
field are permitted, e.g., the string "foo: 1 foo: 2" for a
required/optional field named "foo".
Raises:
ParseError: In case of ASCII parsing problems.
RuntimeError: On runtime errors.
"""
tokenizer.Consume(':')
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = tokenizer.ConsumeInt32()
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = tokenizer.ConsumeInt64()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = tokenizer.ConsumeUint32()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = tokenizer.ConsumeUint64()
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
value = tokenizer.ConsumeEnum(field)
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
if not allow_multiple_scalars and message.HasExtension(field):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" extensions.' %
(message.DESCRIPTOR.full_name, field.full_name))
else:
message.Extensions[field] = value
else:
if not allow_multiple_scalars and message.HasField(field.name):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" fields.' %
(message.DESCRIPTOR.full_name, field.name))
else:
setattr(message, field.name, value)
class _Tokenizer(object):
"""Protocol buffer ASCII representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile(
'[a-zA-Z_][0-9a-zA-Z_+-]*|'
'[0-9+-][0-9a-zA-Z_.+-]*|'
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|'
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)')
_IDENTIFIER = re.compile(r'\w+')
def __init__(self, lines):
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = iter(lines)
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._more_lines = True
self._SkipWhitespace()
self.NextToken()
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return not self.token
def _PopLine(self):
while len(self._current_line) <= self._column:
try:
self._current_line = self._lines.next()
except StopIteration:
self._current_line = ''
self._more_lines = False
return
else:
self._line += 1
self._column = 0
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._WHITESPACE.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self._ParseError('Expected "%s".' % token)
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self._ParseError('Expected identifier.')
self.NextToken()
return result
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=False)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=True)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=False, is_long=True)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
try:
result = ParseFloat(self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
try:
result = ParseBool(self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
the_bytes = self.ConsumeByteString()
try:
return unicode(the_bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
the_list = [self._ConsumeSingleByteString()]
while self.token and self.token[0] in ('\'', '"'):
the_list.append(self._ConsumeSingleByteString())
return ''.encode('latin1').join(the_list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Expected string but found: "%r"' % text)
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote.')
try:
result = text_encoding.CUnescape(text[1:-1])
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ConsumeEnum(self, field):
try:
result = ParseEnum(field, self.token)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError('%d:%d : %s' % (
self._previous_line + 1, self._previous_column + 1, message))
def _ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError('%d:%d : %s' % (
self._line + 1, self._column + 1, message))
def _StringParseError(self, e):
return self._ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._more_lines:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
def ParseInteger(text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
try:
if is_long:
result = long(text, 0)
else:
result = int(text, 0)
except ValueError:
raise ValueError('Couldn\'t parse integer: %s' % text)
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def ParseFloat(text):
"""Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed.
"""
try:
return float(text)
except ValueError:
if _FLOAT_INFINITY.match(text):
if text[0] == '-':
return float('-inf')
else:
return float('inf')
elif _FLOAT_NAN.match(text):
return float('nan')
else:
try:
return float(text.rstrip('f'))
except ValueError:
raise ValueError('Couldn\'t parse float: %s' % text)
def ParseBool(text):
"""Parse a boolean value.
Args:
text: Text to parse.
Returns:
Boolean values parsed
Raises:
ValueError: If text is not a valid boolean.
"""
if text in ('true', 't', '1'):
return True
elif text in ('false', 'f', '0'):
return False
else:
raise ValueError('Expected "true" or "false".')
def ParseEnum(field, value):
"""Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed.
"""
enum_descriptor = field.enum_type
try:
number = int(value, 0)
except ValueError:
enum_value = enum_descriptor.values_by_name.get(value, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, value))
else:
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise ValueError(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
return enum_value.number
| |
#!/usr/bin/python2.4
# Copyright 2009 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defer tool for SCons."""
from __future__ import print_function
import os
import sys
import types
import SCons.Errors
# Current group name being executed by ExecuteDefer(). Set to None outside
# of ExecuteDefer().
_execute_defer_context = None
class DeferGroup:
"""Named list of functions to be deferred."""
# If we derive DeferGroup from object, instances of it return type
# <class 'defer.DeferGroup'>, which prevents SCons.Util.semi_deepcopy()
# from calling its __semi_deepcopy__ function.
# TODO: Make semi_deepcopy() capable of handling classes derived from
# object.
def __init__(self):
"""Initialize deferred function object."""
self.func_env_cwd = []
self.after = set()
def __semi_deepcopy__(self):
"""Makes a semi-deep-copy of this object.
Returns:
A semi-deep-copy of this object.
This means it copies the sets and lists contained by this object, but
doesn't make copies of the function pointers and environments pointed to by
those lists.
Needed so env.Clone() makes a copy of the defer list, so that functions
and after-relationships subsequently added to the clone are not added to
the parent.
"""
c = DeferGroup()
c.func_env_cwd = self.func_env_cwd[:]
c.after = self.after.copy()
return c
def SetDeferRoot(self):
"""Sets the current environment as the root environment for defer.
Args:
self: Current environment context.
Functions deferred by environments cloned from the root environment (that is,
function deferred by children of the root environment) will be executed when
ExecuteDefer() is called from the root environment.
Functions deferred by environments from which the root environment was cloned
(that is, functions deferred by parents of the root environment) will be
passed the root environment instead of the original parent environment.
(Otherwise, they would have no way to determine the root environment.)
"""
# Set the current environment as the root for holding defer groups
self['_DEFER_ROOT_ENV'] = self
# Deferred functions this environment got from its parents will be run in the
# new root context.
for group in GetDeferGroups(self).values():
new_list = [(func, self, cwd) for (func, env, cwd) in group.func_env_cwd]
group.func_env_cwd = new_list
def GetDeferRoot(self):
"""Returns the root environment for defer.
Args:
self: Current environment context.
Returns:
The root environment for defer. If one of this environment's parents
called SetDeferRoot(), returns that environment. Otherwise returns the
current environment.
"""
return self.get('_DEFER_ROOT_ENV', self)
def GetDeferGroups(env):
"""Returns the dict of defer groups from the root defer environment.
Args:
env: Environment context.
Returns:
The dict of defer groups from the root defer environment.
"""
return env.GetDeferRoot()['_DEFER_GROUPS']
def ExecuteDefer(self):
"""Executes deferred functions.
Args:
self: Current environment context.
"""
# Check for re-entrancy
global _execute_defer_context
if _execute_defer_context:
raise SCons.Errors.UserError('Re-entrant call to ExecuteDefer().')
# Save directory, so SConscript functions can occur in the right subdirs
oldcwd = os.getcwd()
# If defer root is set and isn't this environment, we're being called from a
# sub-environment. That's not where we should be called.
if self.GetDeferRoot() != self:
print('Warning: Ignoring call to ExecuteDefer() from child of the '
'environment passed to SetDeferRoot().')
return
# Get list of defer groups from ourselves.
defer_groups = GetDeferGroups(self)
# Loop through deferred functions
try:
while defer_groups:
did_work = False
for name, group in defer_groups.items():
if group.after.intersection(defer_groups.keys()):
continue # Still have dependencies
# Set defer context
_execute_defer_context = name
# Remove this group from the list of defer groups now, in case one of
# the functions it calls adds back a function into that defer group.
del defer_groups[name]
if group.func_env_cwd:
# Run all the functions in our named group
for func, env, cwd in group.func_env_cwd:
os.chdir(cwd)
func(env)
# The defer groups have been altered, so restart the search for
# functions that can be executed.
did_work = True
break
if not did_work:
errmsg = 'Error in ExecuteDefer: dependency cycle detected.\n'
for name, group in defer_groups.items():
errmsg += ' %s after: %s\n' % (name, group.after)
raise SCons.Errors.UserError(errmsg)
finally:
# No longer in a defer context
_execute_defer_context = None
# Restore directory
os.chdir(oldcwd)
def PrintDefer(self, print_functions=True):
"""Prints the current defer dependency graph.
Args:
self: Environment in which PrintDefer() was called.
print_functions: Print individual functions in defer groups.
"""
# Get the defer dict
# Get list of defer groups from ourselves.
defer_groups = GetDeferGroups(self)
dgkeys = defer_groups.keys()
dgkeys.sort()
for k in dgkeys:
print(' +- %s' % k)
group = defer_groups[k]
after = list(group.after)
if after:
print(' | after')
after.sort()
for a in after:
print(' | +- %s' % a)
if print_functions and group.func_env_cwd:
print(' functions')
for func, env, cwd in group.func_env_cwd:
print(' | +- %s %s' % (func.__name__, cwd))
def Defer(self, *args, **kwargs):
"""Adds a deferred function or modifies defer dependencies.
Args:
self: Environment in which Defer() was called
args: Positional arguments
kwargs: Named arguments
The deferred function will be passed the environment used to call Defer(),
and will be executed in the same working directory as the calling SConscript.
(Exception: if this environment is cloned and the clone calls SetDeferRoot()
and then ExecuteDefer(), the function will be passed the root environment,
instead of the environment used to call Defer().)
All deferred functions run after all SConscripts. Additional dependencies
may be specified with the after= keyword.
Usage:
env.Defer(func)
# Defer func() until after all SConscripts
env.Defer(func, after=otherfunc)
# Defer func() until otherfunc() runs
env.Defer(func, 'bob')
# Defer func() until after SConscripts, put in group 'bob'
env.Defer(func2, after='bob')
# Defer func2() until after all funcs in 'bob' group have run
env.Defer(func3, 'sam')
# Defer func3() until after SConscripts, put in group 'sam'
env.Defer('bob', after='sam')
# Defer all functions in group 'bob' until after all functions in group
# 'sam' have run.
env.Defer(func4, after=['bob', 'sam'])
# Defer func4() until after all functions in groups 'bob' and 'sam' have
# run.
"""
# Get name of group to defer and/or the a function
name = None
func = None
for a in args:
if isinstance(a, str):
name = a
elif isinstance(a, types.FunctionType):
func = a
if func and not name:
name = func.__name__
# TODO: Why not allow multiple functions? Should be ok
# Get list of names and/or functions this function should defer until after
after = []
for a in self.Flatten(kwargs.get('after')):
if isinstance(a, str):
# TODO: Should check if '$' in a, and if so, subst() it and recurse into
# it.
after.append(a)
elif isinstance(a, types.FunctionType):
after.append(a.__name__)
elif a is not None:
# Deferring
raise ValueError('Defer after=%r is not a function or name' % a)
# Find the deferred function
defer_groups = GetDeferGroups(self)
if name not in defer_groups:
defer_groups[name] = DeferGroup()
group = defer_groups[name]
# If we were given a function, also save environment and current directory
if func:
group.func_env_cwd.append((func, self, os.getcwd()))
# Add dependencies for the function
group.after.update(after)
# If we are already inside a call to ExecuteDefer(), any functions which are
# deferring until after the current function must also be deferred until
# after this new function. In short, this means that if b() defers until
# after a() and a() calls Defer() to defer c(), then b() must also defer
# until after c().
if _execute_defer_context and name != _execute_defer_context:
for other_name, other_group in GetDeferGroups(self).items():
if other_name == name:
continue # Don't defer after ourselves
if _execute_defer_context in other_group.after:
other_group.after.add(name)
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
env.Append(_DEFER_GROUPS={})
env.AddMethod(Defer)
env.AddMethod(ExecuteDefer)
env.AddMethod(GetDeferRoot)
env.AddMethod(PrintDefer)
env.AddMethod(SetDeferRoot)
| |
# Copyright (c) 2009, 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool import steps
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.config import urls
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.comments import bug_comment_from_commit_text
from webkitpy.tool.grammar import pluralize
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
from webkitpy.common.system.deprecated_logging import error, log
class Clean(AbstractSequencedCommand):
name = "clean"
help_text = "Clean the working copy"
steps = [
steps.CleanWorkingDirectory,
]
def _prepare_state(self, options, args, tool):
options.force_clean = True
class Update(AbstractSequencedCommand):
name = "update"
help_text = "Update working copy (used internally)"
steps = [
steps.CleanWorkingDirectory,
steps.Update,
]
class Build(AbstractSequencedCommand):
name = "build"
help_text = "Update working copy and build"
steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.Build,
]
def _prepare_state(self, options, args, tool):
options.build = True
class BuildAndTest(AbstractSequencedCommand):
name = "build-and-test"
help_text = "Update working copy, build, and run the tests"
steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.Build,
steps.RunTests,
]
class Land(AbstractSequencedCommand):
name = "land"
help_text = "Land the current working directory diff and updates the associated bug if any"
argument_names = "[BUGID]"
show_in_main_help = True
steps = [
steps.AddSvnMimetypeForPng,
steps.UpdateChangeLogsWithReviewer,
steps.ValidateReviewer,
steps.ValidateChangeLogs, # We do this after UpdateChangeLogsWithReviewer to avoid not having to cache the diff twice.
steps.Build,
steps.RunTests,
steps.Commit,
steps.CloseBugForLandDiff,
]
long_help = """land commits the current working copy diff (just as svn or git commit would).
land will NOT build and run the tests before committing, but you can use the --build option for that.
If a bug id is provided, or one can be found in the ChangeLog land will update the bug after committing."""
def _prepare_state(self, options, args, tool):
changed_files = self._tool.scm().changed_files(options.git_commit)
return {
"changed_files": changed_files,
"bug_id": (args and args[0]) or tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files),
}
class LandCowboy(AbstractSequencedCommand):
name = "land-cowboy"
help_text = "Prepares a ChangeLog and lands the current working directory diff."
steps = [
steps.PrepareChangeLog,
steps.EditChangeLog,
steps.CheckStyle,
steps.ConfirmDiff,
steps.Build,
steps.RunTests,
steps.Commit,
steps.CloseBugForLandDiff,
]
def _prepare_state(self, options, args, tool):
options.check_style_filter = "-changelog"
class LandCowhand(LandCowboy):
# Gender-blind term for cowboy, see: http://en.wiktionary.org/wiki/cowhand
name = "land-cowhand"
class CheckStyleLocal(AbstractSequencedCommand):
name = "check-style-local"
help_text = "Run check-webkit-style on the current working directory diff"
steps = [
steps.CheckStyle,
]
class AbstractPatchProcessingCommand(AbstractDeclarativeCommand):
# Subclasses must implement the methods below. We don't declare them here
# because we want to be able to implement them with mix-ins.
#
# pylint: disable-msg=E1101
# def _fetch_list_of_patches_to_process(self, options, args, tool):
# def _prepare_to_process(self, options, args, tool):
# def _process_patch(self, options, args, tool):
@staticmethod
def _collect_patches_by_bug(patches):
bugs_to_patches = {}
for patch in patches:
bugs_to_patches[patch.bug_id()] = bugs_to_patches.get(patch.bug_id(), []) + [patch]
return bugs_to_patches
def execute(self, options, args, tool):
self._prepare_to_process(options, args, tool)
patches = self._fetch_list_of_patches_to_process(options, args, tool)
# It's nice to print out total statistics.
bugs_to_patches = self._collect_patches_by_bug(patches)
log("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches))))
for patch in patches:
self._process_patch(patch, options, args, tool)
class AbstractPatchSequencingCommand(AbstractPatchProcessingCommand):
prepare_steps = None
main_steps = None
def __init__(self):
options = []
self._prepare_sequence = StepSequence(self.prepare_steps)
self._main_sequence = StepSequence(self.main_steps)
options = sorted(set(self._prepare_sequence.options() + self._main_sequence.options()))
AbstractPatchProcessingCommand.__init__(self, options)
def _prepare_to_process(self, options, args, tool):
self._prepare_sequence.run_and_handle_errors(tool, options)
def _process_patch(self, patch, options, args, tool):
state = { "patch" : patch }
self._main_sequence.run_and_handle_errors(tool, options, state)
class ProcessAttachmentsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args)
class ProcessBugsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
all_patches = []
for bug_id in args:
patches = tool.bugs.fetch_bug(bug_id).reviewed_patches()
log("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id))
all_patches += patches
if not all_patches:
log("No reviewed patches found, looking for unreviewed patches.")
for bug_id in args:
patches = tool.bugs.fetch_bug(bug_id).patches()
log("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id))
all_patches += patches
return all_patches
class ProcessURLsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
all_patches = []
for url in args:
bug_id = urls.parse_bug_id(url)
if bug_id:
patches = tool.bugs.fetch_bug(bug_id).patches()
log("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id))
all_patches += patches
attachment_id = urls.parse_attachment_id(url)
if attachment_id:
all_patches += tool.bugs.fetch_attachment(attachment_id)
return all_patches
class CheckStyle(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "check-style"
help_text = "Run check-webkit-style on the specified attachments"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.ApplyPatch,
steps.CheckStyle,
]
class BuildAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "build-attachment"
help_text = "Apply and build patches from bugzilla"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.ApplyPatch,
steps.Build,
]
class BuildAndTestAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "build-and-test-attachment"
help_text = "Apply, build, and test patches from bugzilla"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.ApplyPatch,
steps.Build,
steps.RunTests,
]
class AbstractPatchApplyingCommand(AbstractPatchSequencingCommand):
prepare_steps = [
steps.EnsureLocalCommitIfNeeded,
steps.CleanWorkingDirectoryWithLocalCommits,
steps.Update,
]
main_steps = [
steps.ApplyPatchWithLocalCommit,
]
long_help = """Updates the working copy.
Downloads and applies the patches, creating local commits if necessary."""
class ApplyAttachment(AbstractPatchApplyingCommand, ProcessAttachmentsMixin):
name = "apply-attachment"
help_text = "Apply an attachment to the local working directory"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
show_in_main_help = True
class ApplyFromBug(AbstractPatchApplyingCommand, ProcessBugsMixin):
name = "apply-from-bug"
help_text = "Apply reviewed patches from provided bugs to the local working directory"
argument_names = "BUGID [BUGIDS]"
show_in_main_help = True
class ApplyWatchList(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "apply-watchlist"
help_text = "Applies the watchlist to the specified attachments"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.ApplyPatch,
steps.ApplyWatchList,
]
long_help = """"Applies the watchlist to the specified attachments.
Downloads the attachment, applies it locally, runs the watchlist against it, and updates the bug with the result."""
class AbstractPatchLandingCommand(AbstractPatchSequencingCommand):
main_steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.ApplyPatch,
steps.ValidateChangeLogs,
steps.ValidateReviewer,
steps.Build,
steps.RunTests,
steps.Commit,
steps.ClosePatch,
steps.CloseBug,
]
long_help = """Checks to make sure builders are green.
Updates the working copy.
Applies the patch.
Builds.
Runs the layout tests.
Commits the patch.
Clears the flags on the patch.
Closes the bug if no patches are marked for review."""
class LandAttachment(AbstractPatchLandingCommand, ProcessAttachmentsMixin):
name = "land-attachment"
help_text = "Land patches from bugzilla, optionally building and testing them first"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
show_in_main_help = True
class LandFromBug(AbstractPatchLandingCommand, ProcessBugsMixin):
name = "land-from-bug"
help_text = "Land all patches on the given bugs, optionally building and testing them first"
argument_names = "BUGID [BUGIDS]"
show_in_main_help = True
class LandFromURL(AbstractPatchLandingCommand, ProcessURLsMixin):
name = "land-from-url"
help_text = "Land all patches on the given URLs, optionally building and testing them first"
argument_names = "URL [URLS]"
class ValidateChangelog(AbstractSequencedCommand):
name = "validate-changelog"
help_text = "Validate that the ChangeLogs and reviewers look reasonable"
long_help = """Examines the current diff to see whether the ChangeLogs
and the reviewers listed in the ChangeLogs look reasonable.
"""
steps = [
steps.ValidateChangeLogs,
steps.ValidateReviewer,
]
class AbstractRolloutPrepCommand(AbstractSequencedCommand):
argument_names = "REVISION [REVISIONS] REASON"
def _commit_info(self, revision):
commit_info = self._tool.checkout().commit_info_for_revision(revision)
if commit_info and commit_info.bug_id():
# Note: Don't print a bug URL here because it will confuse the
# SheriffBot because the SheriffBot just greps the output
# of create-rollout for bug URLs. It should do better
# parsing instead.
log("Preparing rollout for bug %s." % commit_info.bug_id())
else:
log("Unable to parse bug number from diff.")
return commit_info
def _prepare_state(self, options, args, tool):
revision_list = []
for revision in str(args[0]).split():
if revision.isdigit():
revision_list.append(int(revision))
else:
raise ScriptError(message="Invalid svn revision number: " + revision)
revision_list.sort()
# We use the earliest revision for the bug info
earliest_revision = revision_list[0]
state = {
"revision": earliest_revision,
"revision_list": revision_list,
"reason": args[1],
}
commit_info = self._commit_info(earliest_revision)
if commit_info:
state["bug_id"] = commit_info.bug_id()
cc_list = sorted([party.bugzilla_email()
for party in commit_info.responsible_parties()
if party.bugzilla_email()])
# FIXME: We should used the list as the canonical representation.
state["bug_cc"] = ",".join(cc_list)
return state
class PrepareRollout(AbstractRolloutPrepCommand):
name = "prepare-rollout"
help_text = "Revert the given revision(s) in the working copy and prepare ChangeLogs with revert reason"
long_help = """Updates the working copy.
Applies the inverse diff for the provided revision(s).
Creates an appropriate rollout ChangeLog, including a trac link and bug link.
"""
steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.RevertRevision,
steps.PrepareChangeLogForRevert,
]
class CreateRollout(AbstractRolloutPrepCommand):
name = "create-rollout"
help_text = "Creates a bug to track the broken SVN revision(s) and uploads a rollout patch."
steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.RevertRevision,
steps.CreateBug,
steps.PrepareChangeLogForRevert,
steps.PostDiffForRevert,
]
def _prepare_state(self, options, args, tool):
state = AbstractRolloutPrepCommand._prepare_state(self, options, args, tool)
# Currently, state["bug_id"] points to the bug that caused the
# regression. We want to create a new bug that blocks the old bug
# so we move state["bug_id"] to state["bug_blocked"] and delete the
# old state["bug_id"] so that steps.CreateBug will actually create
# the new bug that we want (and subsequently store its bug id into
# state["bug_id"])
state["bug_blocked"] = state["bug_id"]
del state["bug_id"]
state["bug_title"] = "REGRESSION(r%s): %s" % (state["revision"], state["reason"])
state["bug_description"] = "%s broke the build:\n%s" % (urls.view_revision_url(state["revision"]), state["reason"])
# FIXME: If we had more context here, we could link to other open bugs
# that mention the test that regressed.
if options.parent_command == "sheriff-bot":
state["bug_description"] += """
This is an automatic bug report generated by the sheriff-bot. If this bug
report was created because of a flaky test, please file a bug for the flaky
test (if we don't already have one on file) and dup this bug against that bug
so that we can track how often these flaky tests case pain.
"Only you can prevent forest fires." -- Smokey the Bear
"""
return state
class Rollout(AbstractRolloutPrepCommand):
name = "rollout"
show_in_main_help = True
help_text = "Revert the given revision(s) in the working copy and optionally commit the revert and re-open the original bug"
long_help = """Updates the working copy.
Applies the inverse diff for the provided revision.
Creates an appropriate rollout ChangeLog, including a trac link and bug link.
Opens the generated ChangeLogs in $EDITOR.
Shows the prepared diff for confirmation.
Commits the revert and updates the bug (including re-opening the bug if necessary)."""
steps = [
steps.CleanWorkingDirectory,
steps.Update,
steps.RevertRevision,
steps.PrepareChangeLogForRevert,
steps.EditChangeLog,
steps.ConfirmDiff,
steps.Build,
steps.Commit,
steps.ReopenBugAfterRollout,
]
| |
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from rez.solver import Solver, SolverStatus
from rez.package_repository import package_repository_manager
from rez.packages import get_variant, get_last_release_time
from rez.package_filter import PackageFilterList, TimestampRule
from rez.utils.memcached import memcached_client, pool_memcached_connections
from rez.utils.logging_ import log_duration
from rez.config import config
from rez.vendor.enum import Enum
from rez.vendor.version.requirement import Requirement
from contextlib import contextmanager
from hashlib import sha1
class ResolverStatus(Enum):
""" Enum to represent the current state of a resolver instance. The enum
also includes a human readable description of what the state represents.
"""
pending = ("The resolve has not yet started.", )
solved = ("The resolve has completed successfully.", )
failed = ("The resolve is not possible.", )
aborted = ("The resolve was stopped by the user (via callback).", )
def __init__(self, description):
self.description = description
class Resolver(object):
"""The package resolver.
The Resolver uses a combination of Solver(s) and cache(s) to resolve a
package request as quickly as possible.
"""
def __init__(self, context, package_requests, package_paths, package_filter=None,
package_orderers=None, timestamp=0, callback=None, building=False,
verbosity=False, buf=None, package_load_callback=None, caching=True,
suppress_passive=False, print_stats=False):
"""Create a Resolver.
Args:
package_requests: List of Requirement objects representing the
request.
package_paths: List of paths to search for pkgs.
package_filter (`PackageFilterList`): Package filter.
package_orderers (list of `PackageOrder`): Custom package ordering.
callback: See `Solver`.
package_load_callback: If not None, this callable will be called
prior to each package being loaded. It is passed a single
`Package` object.
building: True if we're resolving for a build.
caching: If True, cache(s) may be used to speed the resolve. If
False, caches will not be used.
print_stats (bool): If true, print advanced solver stats at the end.
"""
self.context = context
self.package_requests = package_requests
self.package_paths = package_paths
self.timestamp = timestamp
self.callback = callback
self.package_orderers = package_orderers
self.package_load_callback = package_load_callback
self.building = building
self.verbosity = verbosity
self.caching = caching
self.buf = buf
self.suppress_passive = suppress_passive
self.print_stats = print_stats
# store hash of package orderers. This is used in the memcached key
if package_orderers:
sha1s = ''.join(x.sha1 for x in package_orderers)
self.package_orderers_hash = sha1(sha1s.encode("utf8")).hexdigest()
else:
self.package_orderers_hash = ''
# store hash of pre-timestamp-combined package filter. This is used in
# the memcached key
if package_filter:
self.package_filter_hash = package_filter.sha1
else:
self.package_filter_hash = ''
# combine timestamp and package filter into single filter
if self.timestamp:
if package_filter:
self.package_filter = package_filter.copy()
else:
self.package_filter = PackageFilterList()
rule = TimestampRule.after(self.timestamp)
self.package_filter.add_exclusion(rule)
else:
self.package_filter = package_filter
self.status_ = ResolverStatus.pending
self.resolved_packages_ = None
self.resolved_ephemerals_ = None
self.failure_description = None
self.graph_ = None
self.from_cache = False
self.memcached_servers = config.memcached_uri if config.resolve_caching else None
self.solve_time = 0.0 # time spent solving
self.load_time = 0.0 # time spent loading package resources
self._print = config.debug_printer("resolve_memcache")
@pool_memcached_connections
def solve(self):
"""Perform the solve.
"""
with log_duration(self._print, "memcache get (resolve) took %s"):
solver_dict = self._get_cached_solve()
if solver_dict:
self.from_cache = True
self._set_result(solver_dict)
else:
self.from_cache = False
solver = self._solve()
solver_dict = self._solver_to_dict(solver)
self._set_result(solver_dict)
with log_duration(self._print, "memcache set (resolve) took %s"):
self._set_cached_solve(solver_dict)
@property
def status(self):
"""Return the current status of the resolve.
Returns:
ResolverStatus.
"""
return self.status_
@property
def resolved_packages(self):
"""Get the list of resolved packages.
Returns:
List of `PackageVariant` objects, or None if the resolve has not
completed.
"""
return self.resolved_packages_
@property
def resolved_ephemerals(self):
"""Get the list of resolved ewphemerals.
Returns:
List of `Requirement` objects, or None if the resolve has not
completed.
"""
return self.resolved_ephemerals_
@property
def graph(self):
"""Return the resolve graph.
The resolve graph shows unsuccessful as well as successful resolves.
Returns:
A pygraph.digraph object, or None if the solve has not completed.
"""
return self.graph_
def _get_variant(self, variant_handle):
return get_variant(variant_handle, context=self.context)
def _get_cached_solve(self):
"""Find a memcached resolve.
If there is NOT a resolve timestamp:
- fetch a non-timestamped memcache entry;
- if no entry, then fail;
- if packages have changed, then:
- delete the entry;
- fail;
- if no packages in the entry have been released since, then
- use the entry and return;
- else:
- delete the entry;
- fail.
If there IS a resolve timestamp (let us call this T):
- fetch a non-timestamped memcache entry;
- if entry then:
- if no packages have changed, then:
- if no packages in the entry have been released since:
- if no packages in the entry were released after T, then
- use the entry and return;
- else:
- delete the entry;
- else:
- delete the entry;
- fetch a timestamped (T) memcache entry;
- if no entry, then fail;
- if packages have changed, then:
- delete the entry;
- fail;
- else:
- use the entry.
This behaviour exists specifically so that resolves that use a
timestamp but set that to the current time, can be reused by other
resolves if nothing has changed. Older resolves however, can only be
reused if the timestamp matches exactly (but this might happen a lot -
consider a workflow where a work area is tied down to a particular
timestamp in order to 'lock' it from any further software releases).
"""
if not (self.caching and self.memcached_servers):
return None
# these caches avoids some potentially repeated file stats
variant_states = {}
last_release_times = {}
def _hit(data):
solver_dict, _, _ = data
return solver_dict
def _miss():
self._print("No cache key retrieved")
return None
def _delete_cache_entry(key):
with self._memcached_client() as client:
client.delete(key)
self._print("Discarded entry: %r", key)
def _retrieve(timestamped):
key = self._memcache_key(timestamped=timestamped)
self._print("Retrieving memcache key: %r", key)
with self._memcached_client() as client:
data = client.get(key)
return key, data
def _packages_changed(key, data):
solver_dict, _, variant_states_dict = data
for variant_handle in solver_dict.get("variant_handles", []):
variant = self._get_variant(variant_handle)
old_state = variant_states_dict.get(variant.name)
new_state = variant_states.get(variant)
if new_state is None:
try:
repo = variant.resource._repository
new_state = repo.get_variant_state_handle(variant.resource)
except (IOError, OSError) as e:
# if, ie a package file was deleted on disk, then
# an IOError or OSError will be raised when we try to
# read from it - assume that the packages have changed!
self._print("Error loading %r (assuming cached state "
"changed): %s", variant.qualified_name,
e)
return True
variant_states[variant] = new_state
if old_state != new_state:
self._print("%r has been modified", variant.qualified_name)
return True
return False
def _releases_since_solve(key, data):
_, release_times_dict, _ = data
for package_name, release_time in release_times_dict.items():
time_ = last_release_times.get(package_name)
if time_ is None:
time_ = get_last_release_time(package_name, self.package_paths)
last_release_times[package_name] = time_
if time_ != release_time:
self._print(
"A newer version of %r (%d) has been released since the "
"resolve was cached (latest release in cache was %d) "
"(entry: %r)", package_name, time_, release_time, key)
return True
return False
def _timestamp_is_earlier(key, data):
_, release_times_dict, _ = data
for package_name, release_time in release_times_dict.items():
if self.timestamp < release_time:
self._print("Resolve timestamp (%d) is earlier than %r in "
"solve (%d) (entry: %r)", self.timestamp,
package_name, release_time, key)
return True
return False
key, data = _retrieve(False)
if self.timestamp:
if data:
if _packages_changed(key, data) or _releases_since_solve(key, data):
_delete_cache_entry(key)
elif not _timestamp_is_earlier(key, data):
return _hit(data)
key, data = _retrieve(True)
if not data:
return _miss()
if _packages_changed(key, data):
_delete_cache_entry(key)
return _miss()
else:
return _hit(data)
else:
if not data:
return _miss()
if _packages_changed(key, data) or _releases_since_solve(key, data):
_delete_cache_entry(key)
return _miss()
else:
return _hit(data)
@contextmanager
def _memcached_client(self):
with memcached_client(self.memcached_servers,
debug=config.debug_memcache) as client:
yield client
def _set_cached_solve(self, solver_dict):
"""Store a solve to memcached.
If there is NOT a resolve timestamp:
- store the solve to a non-timestamped entry.
If there IS a resolve timestamp (let us call this T):
- if NO newer package in the solve has been released since T,
- then store the solve to a non-timestamped entry;
- else:
- store the solve to a timestamped entry.
"""
if self.status_ != ResolverStatus.solved:
return # don't cache failed solves
if not (self.caching and self.memcached_servers):
return
# most recent release times get stored with solve result in the cache
releases_since_solve = False
release_times_dict = {}
variant_states_dict = {}
for variant in self.resolved_packages_:
time_ = get_last_release_time(variant.name, self.package_paths)
# don't cache if a release time isn't known
if time_ == 0:
self._print("Did not send memcache key: a repository could "
"not provide a most recent release time for %r",
variant.name)
return
if self.timestamp and self.timestamp < time_:
releases_since_solve = True
release_times_dict[variant.name] = time_
repo = variant.resource._repository
variant_states_dict[variant.name] = \
repo.get_variant_state_handle(variant.resource)
timestamped = (self.timestamp and releases_since_solve)
key = self._memcache_key(timestamped=timestamped)
data = (solver_dict, release_times_dict, variant_states_dict)
with self._memcached_client() as client:
client.set(key, data)
self._print("Sent memcache key: %r", key)
def _memcache_key(self, timestamped=False):
"""Makes a key suitable as a memcache entry."""
request = tuple(map(str, self.package_requests))
repo_ids = []
for path in self.package_paths:
repo = package_repository_manager.get_repository(path)
repo_ids.append(repo.uid)
t = ["resolve",
request,
tuple(repo_ids),
self.package_filter_hash,
self.package_orderers_hash,
self.building,
config.prune_failed_graph]
if timestamped and self.timestamp:
t.append(self.timestamp)
return str(tuple(t))
def _solve(self):
solver = Solver(package_requests=self.package_requests,
package_paths=self.package_paths,
context=self.context,
package_filter=self.package_filter,
package_orderers=self.package_orderers,
callback=self.callback,
package_load_callback=self.package_load_callback,
building=self.building,
verbosity=self.verbosity,
prune_unfailed=config.prune_failed_graph,
buf=self.buf,
suppress_passive=self.suppress_passive,
print_stats=self.print_stats)
solver.solve()
return solver
def _set_result(self, solver_dict):
self.status_ = solver_dict.get("status")
self.graph_ = solver_dict.get("graph")
self.solve_time = solver_dict.get("solve_time")
self.load_time = solver_dict.get("load_time")
self.failure_description = solver_dict.get("failure_description")
self.resolved_packages_ = None
self.resolved_ephemerals_ = None
if self.status_ == ResolverStatus.solved:
# convert solver.Variants to packages.Variants
self.resolved_packages_ = []
for variant_handle in solver_dict.get("variant_handles", []):
variant = self._get_variant(variant_handle)
self.resolved_packages_.append(variant)
self.resolved_ephemerals_ = []
for req_str in solver_dict.get("ephemerals", []):
req = Requirement(req_str)
self.resolved_ephemerals_.append(req)
@classmethod
def _solver_to_dict(cls, solver):
graph_ = solver.get_graph()
solve_time = solver.solve_time
load_time = solver.load_time
failure_description = None
variant_handles = None
ephemerals = None
st = solver.status
if st == SolverStatus.unsolved:
status_ = ResolverStatus.aborted
failure_description = solver.abort_reason
elif st == SolverStatus.failed:
status_ = ResolverStatus.failed
failure_description = solver.failure_description()
elif st == SolverStatus.solved:
status_ = ResolverStatus.solved
variant_handles = []
for solver_variant in solver.resolved_packages:
variant_handle_dict = solver_variant.handle
variant_handles.append(variant_handle_dict)
ephemerals = []
for ephemeral in solver.resolved_ephemerals:
ephemerals.append(str(ephemeral))
return dict(
status=status_,
graph=graph_,
solve_time=solve_time,
load_time=load_time,
failure_description=failure_description,
variant_handles=variant_handles,
ephemerals=ephemerals
)
| |
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# Author: Christopher Moody <chrisemoody@gmail.com>
# Author: Nick Travers <nickt@squareup.com>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TrucnatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
X = check_array(X, dtype=np.float32)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(obj_func, params, **opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
| |
from .. import Availability, Class, Constant, Define, Method, Parameter, Type
gx_class = Class('DMPPLY',
doc="Datamine Multiple polygon object")
gx_methods = {
'Miscellaneous': [
Method('_Clear_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Clear/remove all polygons from the :class:`DMPPLY`.",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY")
]),
Method('Copy_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Copy",
return_type=Type.VOID,
parameters = [
Parameter('dest', type="DMPPLY",
doc="Destination"),
Parameter('source', type="DMPPLY",
doc="Source")
]),
Method('Create_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Creates a :class:`DMPPLY` object.",
return_type="DMPPLY",
return_doc="DMPLY Object"),
Method('Destroy_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.PUBLIC,
doc="Destroys the :class:`DMPPLY` object.",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` Object")
]),
Method('GetAzimuth_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get the azimuth of a given polygon.",
notes="""
The azimuth is the equivalent section azimuth,
equal to the azimuth of the normal vector plus
90 degrees.
""",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP)"),
Parameter('az', type=Type.DOUBLE, is_ref=True,
doc="Azimuth (degrees) (o)")
]),
Method('GetExtents_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get the center, width and height of a given polygon.",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP)"),
Parameter('x', type=Type.DOUBLE, is_ref=True,
doc="Center point X (o)"),
Parameter('y', type=Type.DOUBLE, is_ref=True,
doc="Center point Y (o)"),
Parameter('z', type=Type.DOUBLE, is_ref=True,
doc="Center point Z (o)"),
Parameter('w', type=Type.DOUBLE, is_ref=True,
doc="Width of polygon (in its plane) (o)"),
Parameter('h', type=Type.DOUBLE, is_ref=True,
doc="Height of polygon (Z extent) (o)")
]),
Method('GetJoins_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get join lines for each vertex in a specific polygon.",
notes="""
If a specific vertex is not joined, the returned value is 0.
If the vertex is joined, then the index of the join line (1 to NJoins)
is returned.
""",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc="Datamine polygon Object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to N)"),
Parameter('vv', type="VV",
doc="INT :class:`VV` of join indices (1 to NJoins).")
]),
Method('GetNormalVectors_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get the normal vectors of a given polygon.",
notes="""
Three normalized vectors are returned.
The first is horizontal, in the plane of the polygon.
The second is in the vertical plane, corresponding to the
"down-dip" direction.
The third is the normal vector to the polygon plane.
""",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP)"),
Parameter('x1', type=Type.DOUBLE, is_ref=True,
doc="X component (o) (Horizontal azimuth vector)"),
Parameter('y1', type=Type.DOUBLE, is_ref=True,
doc="Y component (o)"),
Parameter('z1', type=Type.DOUBLE, is_ref=True,
doc="Z component (o)"),
Parameter('x2', type=Type.DOUBLE, is_ref=True,
doc="X component (o) (Down-dip, in the vertical plane)"),
Parameter('y2', type=Type.DOUBLE, is_ref=True,
doc="Y component (o)"),
Parameter('z2', type=Type.DOUBLE, is_ref=True,
doc="Z component (o)"),
Parameter('x3', type=Type.DOUBLE, is_ref=True,
doc="X component (o) (Normal vector)"),
Parameter('y3', type=Type.DOUBLE, is_ref=True,
doc="Y component (o)"),
Parameter('z3', type=Type.DOUBLE, is_ref=True,
doc="Z component (o)")
]),
Method('GetPoly_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get a specific polygon from a :class:`DMPPLY` object.",
notes="Get the number of points from the :class:`VV` length.",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP) (i)"),
Parameter('vv_x', type="VV",
doc="X Locations (o)"),
Parameter('vv_y', type="VV",
doc="Y Locations (o)"),
Parameter('vv_z', type="VV",
doc="Z Locations (o)")
]),
Method('GetSwing_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get the swing of a given polygon.",
notes="""
The swing is the equivalent section swing,
equal to zero for vertical plates, and increasing
as the normal vector goes from horizontal upward.
""",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP)"),
Parameter('az', type=Type.DOUBLE, is_ref=True,
doc="Swing (degrees) (o)")
]),
Method('GetVertex_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get a vertex location from a :class:`DMPPLY` object.",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP)"),
Parameter('v', type=Type.INT32_T,
doc="Vertex number (1 to NV)"),
Parameter('x', type=Type.DOUBLE, is_ref=True,
doc="X Location (o)"),
Parameter('y', type=Type.DOUBLE, is_ref=True,
doc="Y Location (o)"),
Parameter('z', type=Type.DOUBLE, is_ref=True,
doc="Z Location (o)")
]),
Method('iNumJoins_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get the number of joining lines in a :class:`DMPPLY` object.",
return_type=Type.INT32_T,
return_doc="Number of joining lines",
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object")
]),
Method('iNumPolys_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get the number of polygons in a :class:`DMPPLY` object.",
notes="""
The value returned is the "NP" used in function descriptions
below.
""",
return_type=Type.INT32_T,
return_doc="Number of polygons",
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object")
]),
Method('iNumVertices_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Get the number of vertices in a polygon.",
notes="""
The value returned is the "NV" used in function descriptions
below.
""",
return_type=Type.INT32_T,
return_doc="Number of vertices in a polygon",
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP)")
]),
Method('Load_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Loads a Datamine polygon file.",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` Object"),
Parameter('file', type=Type.STRING,
doc="Name of the file to load")
]),
Method('MoveVertex_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Moves a vertex and any associated lines.",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP)"),
Parameter('v', type=Type.INT32_T,
doc="Vertex number (1 to NV)"),
Parameter('x', type=Type.DOUBLE,
doc="New location X"),
Parameter('y', type=Type.DOUBLE,
doc="New location Y"),
Parameter('z', type=Type.DOUBLE,
doc="New location Z")
]),
Method('ProjectPoly_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Project a polygon onto a vertical plane.",
notes="""
Gives the location in plane coordinates of a selected polygon,
after it has been projected perpendicularly onto the plane.
Plane coodinates: X - horizontal in plane
Y - "vertical" in plane (can be a swing)
Z - horizontal, "perpendicular" to plane (RH)
""",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP)"),
Parameter('xp', type=Type.DOUBLE,
doc="X location of plane origin in 3D"),
Parameter('yp', type=Type.DOUBLE,
doc="Y location of plane origin in 3D"),
Parameter('zp', type=Type.DOUBLE,
doc="Z location of plane origin in 3D"),
Parameter('az', type=Type.DOUBLE,
doc="Azimuth of the plane in degrees"),
Parameter('swing', type=Type.DOUBLE,
doc="Swing of the plane in degrees"),
Parameter('vv_x', type="VV",
doc="X (horizontal along-section locations on vertical plane (o)"),
Parameter('vv_y', type="VV",
doc="Y (vertical locations on vertical plane (o)"),
Parameter('vv_z', type="VV",
doc="Z (horizontal distances perpendicular to the plane (o)")
]),
Method('ReProjectPoly_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Recover polygon locations from 2D locations on vertical plane.",
notes="""
This is the inverse operation of :func:`ProjectPoly_DMPPLY`.
Input the 2D locations on the projected vertical plane. These locations
are projected back onto the original polygon plane.
""",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to lNP) (i)"),
Parameter('xp', type=Type.DOUBLE,
doc="X location of plane origin in 3D (i)"),
Parameter('yp', type=Type.DOUBLE,
doc="Y location of plane origin in 3D (i)"),
Parameter('zp', type=Type.DOUBLE,
doc="Z location of plane origin in 3D (i)"),
Parameter('az', type=Type.DOUBLE,
doc="Azimuth of the plane in degrees (i)"),
Parameter('vv_x', type="VV",
doc="X locations on vertical plane (i)"),
Parameter('vv_y', type="VV",
doc="Y (actually Z) locations on vertical plane (i)"),
Parameter('vv_x3', type="VV",
doc="X Locations of polygon (o)"),
Parameter('vv_y3', type="VV",
doc="Y Locations of polygon (o)"),
Parameter('vv_z3', type="VV",
doc="Z Locations of polygon (o)")
]),
Method('Save_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Save to a Datamine polygon file",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` Object"),
Parameter('file', type=Type.STRING,
doc="Name of the file to save to")
]),
Method('SetPoly_DMPPLY', module='geogxx', version='6.0.0',
availability=Availability.LICENSED,
doc="Set a specific polygon into a :class:`DMPPLY` object.",
notes="Get the number of points from the :class:`VV` length.",
return_type=Type.VOID,
parameters = [
Parameter('dmpply', type="DMPPLY",
doc=":class:`DMPPLY` object"),
Parameter('p', type=Type.INT32_T,
doc="Polygon number (1 to NP) (i)"),
Parameter('vv_x', type="VV",
doc="X Locations (i)"),
Parameter('vv_y', type="VV",
doc="Y Locations (i)"),
Parameter('vv_z', type="VV",
doc="Z Locations (i)")
])
]
}
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from pylib import constants
from pylib import ports
from pylib.base import test_run
from pylib.device import device_errors
from pylib.gtest import gtest_test_instance
from pylib.local import local_test_server_spawner
from pylib.local.device import local_device_environment
from pylib.local.device import local_device_test_run
from pylib.utils import apk_helper
from pylib.utils import device_temp_file
_COMMAND_LINE_FLAGS_SUPPORTED = True
_EXTRA_COMMAND_LINE_FILE = (
'org.chromium.native_test.NativeTestActivity.CommandLineFile')
_EXTRA_COMMAND_LINE_FLAGS = (
'org.chromium.native_test.NativeTestActivity.CommandLineFlags')
_EXTRA_NATIVE_TEST_ACTIVITY = (
'org.chromium.native_test.NativeTestInstrumentationTestRunner'
'.NativeTestActivity')
_MAX_SHARD_SIZE = 256
# TODO(jbudorick): Move this up to the test instance if the net test server is
# handled outside of the APK for the remote_device environment.
_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [
'components_browsertests', 'content_unittests', 'content_browsertests',
'net_unittests', 'unit_tests'
]
class _ApkDelegate(object):
def __init__(self, apk):
self._apk = apk
helper = apk_helper.ApkHelper(self._apk)
self._activity = helper.GetActivityName()
self._package = helper.GetPackageName()
self._runner = helper.GetInstrumentationName()
self._component = '%s/%s' % (self._package, self._runner)
self._enable_test_server_spawner = False
def Install(self, device):
device.Install(self._apk)
def RunWithFlags(self, device, flags, **kwargs):
with device_temp_file.DeviceTempFile(device.adb) as command_line_file:
device.WriteFile(command_line_file.name, '_ %s' % flags)
extras = {
_EXTRA_COMMAND_LINE_FILE: command_line_file.name,
_EXTRA_NATIVE_TEST_ACTIVITY: self._activity,
}
return device.StartInstrumentation(
self._component, extras=extras, raw=False, **kwargs)
def Clear(self, device):
device.ClearApplicationState(self._package)
class _ExeDelegate(object):
def __init__(self, exe, tr):
self._exe_host_path = exe
self._exe_file_name = os.path.split(exe)[-1]
self._exe_device_path = '%s/%s' % (
constants.TEST_EXECUTABLE_DIR, self._exe_file_name)
deps_host_path = self._exe_host_path + '_deps'
if os.path.exists(deps_host_path):
self._deps_host_path = deps_host_path
self._deps_device_path = self._exe_device_path + '_deps'
else:
self._deps_host_path = None
self._test_run = tr
def Install(self, device):
# TODO(jbudorick): Look into merging this with normal data deps pushing if
# executables become supported on nonlocal environments.
host_device_tuples = [(self._exe_host_path, self._exe_device_path)]
if self._deps_host_path:
host_device_tuples.append((self._deps_host_path, self._deps_device_path))
device.PushChangedFiles(host_device_tuples)
def RunWithFlags(self, device, flags, **kwargs):
cmd = [
self._test_run.GetTool(device).GetTestWrapper(),
self._exe_device_path,
flags,
]
cwd = constants.TEST_EXECUTABLE_DIR
env = {
'LD_LIBRARY_PATH':
'%s/%s_deps' % (constants.TEST_EXECUTABLE_DIR, self._exe_file_name),
}
try:
gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
external = device.GetExternalStoragePath()
env['GCOV_PREFIX'] = '%s/gcov' % external
env['GCOV_PREFIX_STRIP'] = gcov_strip_depth
except (device_errors.CommandFailedError, KeyError):
pass
# TODO(jbudorick): Switch to just RunShellCommand once perezju@'s CL
# for long shell commands lands.
with device_temp_file.DeviceTempFile(device.adb) as script_file:
script_contents = ' '.join(cmd)
logging.info('script contents: %r' % script_contents)
device.WriteFile(script_file.name, script_contents)
output = device.RunShellCommand(['sh', script_file.name], cwd=cwd,
env=env, **kwargs)
return output
def Clear(self, device):
device.KillAll(self._exe_file_name, blocking=True, timeout=30, quiet=True)
class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
assert isinstance(env, local_device_environment.LocalDeviceEnvironment)
assert isinstance(test_instance, gtest_test_instance.GtestTestInstance)
super(LocalDeviceGtestRun, self).__init__(env, test_instance)
if self._test_instance.apk:
self._delegate = _ApkDelegate(self._test_instance.apk)
elif self._test_instance.exe:
self._delegate = _ExeDelegate(self, self._test_instance.exe)
self._servers = {}
#override
def TestPackage(self):
return self._test_instance.suite
#override
def SetUp(self):
def individual_device_set_up(dev, host_device_tuples):
# Install test APK.
self._delegate.Install(dev)
# Push data dependencies.
external_storage = dev.GetExternalStoragePath()
host_device_tuples = [
(h, d if d is not None else external_storage)
for h, d in host_device_tuples]
dev.PushChangedFiles(host_device_tuples)
self._servers[str(dev)] = []
if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER:
self._servers[str(dev)].append(
local_test_server_spawner.LocalTestServerSpawner(
ports.AllocateTestServerPort(), dev, self.GetTool(dev)))
for s in self._servers[str(dev)]:
s.SetUp()
self._env.parallel_devices.pMap(individual_device_set_up,
self._test_instance.GetDataDependencies())
#override
def _ShouldShard(self):
return True
#override
def _CreateShards(self, tests):
if self._test_instance.suite in gtest_test_instance.BROWSER_TEST_SUITES:
return tests
else:
device_count = len(self._env.devices)
shards = []
for i in xrange(0, device_count):
unbounded_shard = tests[i::device_count]
shards += [unbounded_shard[j:j+_MAX_SHARD_SIZE]
for j in xrange(0, len(unbounded_shard), _MAX_SHARD_SIZE)]
return [':'.join(s) for s in shards]
#override
def _GetTests(self):
tests = self._delegate.RunWithFlags(
self._env.devices[0], '--gtest_list_tests')
tests = gtest_test_instance.ParseGTestListTests(tests)
tests = self._test_instance.FilterTests(tests)
return tests
#override
def _RunTest(self, device, test):
# Run the test.
output = self._delegate.RunWithFlags(
device, '--gtest_filter=%s' % test, timeout=900, retries=0)
for s in self._servers[str(device)]:
s.Reset()
self._delegate.Clear(device)
# Parse the output.
# TODO(jbudorick): Transition test scripts away from parsing stdout.
results = self._test_instance.ParseGTestOutput(output)
return results
#override
def TearDown(self):
def individual_device_tear_down(dev):
for s in self._servers[str(dev)]:
s.TearDown()
self._env.parallel_devices.pMap(individual_device_tear_down)
| |
# Main function to do group photo enhancement
import _init_paths
import tensorflow as tf
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect, im_detect_ori
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import os, sys, cv2
import argparse
from networks.factory import get_network
import ipdb
from facegroup import obtainSimilarityScore
from faceswap import *
# (Yuliang) Background + voc(w/o person) + face
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'pottedplant', 'sheep',
'sofa', 'train', 'tvmonitor', 'face')
def face_detect(sess, net, image_name):
"""Give bounding boxes and quality scores of one given image"""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
# scores, boxes = im_detect(sess, net, im)
scores, boxes, eyes, smiles = im_detect_ori(sess, net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
# im = im[:, :, (2, 1, 0)]
# fig, ax = plt.subplots(figsize=(8, 8))
# ax.imshow(im, aspect='equal')
CONF_THRESH = 0.9
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[20:]):
cls_ind += 20 # because we skipped everything except face
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
eye = eyes[keep, :]
smile= smiles[keep, :]
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
face_num = len(inds)
print '{} faces detected!'.format(face_num)
dets = dets[inds, :]
eye = eye[inds, 1]
smile = smile[inds, 1]
return dets, eye, smile
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]', default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode', help='Use CPU mode (overrides --gpu)', action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]', default='VGGnet_test')
parser.add_argument('--model', dest='model', help='Model path', default='model/VGGnet_fast_rcnn_full_eye_smile_1e-4_iter_70000.ckpt')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
if args.model == ' ':
raise IOError(('Error: Model not found.\n'))
# init session
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# load network
net = get_network(args.demo_net)
# load model
saver = tf.train.Saver()
saver.restore(sess, args.model)
#sess.run(tf.initialize_all_variables())
print '\n\nLoaded network {:s}'.format(args.model)
# Warmup on a dummy image
im = 128 * np.ones((300, 300, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(sess, net, im)
im_names = ['f2_1.png', 'f2_2.png', 'f2_3.png']
# im_names = ['11.jpg', '22.jpg', '33.jpg']
im_num = len(im_names)
im_info = {}
best_score = 0
best_im = ''
face_num = 0
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Detection for data/demo/{}'.format(im_name)
dets, eyes, smiles = face_detect(sess, net, im_name)
im_info[im_name] = {}
im_info[im_name]['dets'] = dets
im_info[im_name]['eyes'] = eyes
im_info[im_name]['smiles'] = smiles
overall_score = eyes.sum() + smiles.sum()
if overall_score > best_score:
best_score = overall_score
best_im = im_name
face_num = len(eyes)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'The candidate image is: {}'.format(best_im)
print 'The face number of the group is: {}'.format(face_num)
# Use faces from candidate as default best faces
best_faces = [best_im] * face_num
best_scores = []
best_face_box = []
for i in range(face_num):
best_scores.append(im_info[best_im]['eyes'][i] + im_info[best_im]['smiles'][i])
best_face_box.append(im_info[best_im]['dets'][i])
cand_img = cv2.imread('./data/demo/' + best_im)
# for i in range(face_num):
# xmin, ymin, xmax, ymax, _ = im_info[best_im]['dets'][i]
# face = cand_img[ymin:ymax, xmin:xmax, :]
# face = face[:, :, (2, 1, 0)] # BGR -> RGB
# plt.imshow(face)
# plt.show()
# Grouping and find best faces
for im_name in im_names:
if im_name == best_im:
continue
temp_num = len(im_info[im_name]['eyes'])
for i in range(temp_num):
temp_score = im_info[im_name]['eyes'][i] + im_info[im_name]['smiles'][i]
# If the score is too small, just ignore it
if temp_score < min(best_scores):
continue
img = cv2.imread('./data/demo/' + im_name)
xmin, ymin, xmax, ymax, _ = im_info[im_name]['dets'][i]
face_data = img[ymin:ymax, xmin:xmax, :]
largest_sim = 60
match_id = -1 # assume no matching
# Compare with each face in candidate image
for j in range(face_num):
xmin, ymin, xmax, ymax, _ = im_info[best_im]['dets'][j]
temp_face = cand_img[ymin:ymax, xmin:xmax, :]
# plt.imshow(face_data)
# plt.show()
# plt.imshow(temp_face)
# plt.show()
sim = obtainSimilarityScore(face_data, temp_face)
# if j == 3:
# print sim
# plt.imshow(face_data)
# plt.show()
if sim > largest_sim:
largest_sim = sim
match_id = j
# print largest_sim
# No matching
if match_id == -1:
continue
if temp_score > best_scores[match_id]:
best_faces[match_id] = im_name
best_scores[match_id] = temp_score
best_face_box[match_id] = im_info[im_name]['dets'][j]
ipdb.set_trace()
for i, im_name in enumerate(best_faces):
img = cv2.imread('./data/demo/' + im_name)
xmin, ymin, xmax, ymax, _ = best_face_box[i]
img = img[ymin:ymax, xmin:xmax, :]
im = im[:, :, (2, 1, 0)] # BGR -> RGB
# plt.imshow(img)
# plt.show()
# print best_faces
# print best_scores
# print best_face_box
# Face swapping
cand_img = cv2.imread('./data/demo/' + best_im)
for i in range(face_num):
# Don't need to change
if best_faces[i] == best_im:
continue
# target - good, source - bad
target_img = cv2.imread('./data/demo/' + best_faces[i])
xmin, ymin, xmax, ymax, _ = best_face_box[i]
target_face = target_img[ymin:ymax, xmin:xmax, :]
xmin, ymin, xmax, ymax, _ = im_info[best_im]['dets'][i]
source_face = cand_img[ymin:ymax, xmin:xmax, :]
PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(PREDICTOR_PATH)
source_landmark = get_landmarks(source_face, predictor)
target_landmark = get_landmarks(target_face, predictor)
M = transformation_from_points(source_landmark[ALIGN_POINTS], target_landmark[ALIGN_POINTS])
mask = get_face_mask(target_face, target_landmark)
warped_mask = warp_im(mask, M, source_face.shape)
combined_mask = numpy.max([get_face_mask(source_face, source_landmark), warped_mask], axis=0)
warped_im2 = warp_im(target_face, M, source_face.shape)
warped_corrected_im2 = correct_colours(source_face, warped_im2, source_landmark)
output_im = source_face * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
cand_img[ymin:ymax, xmin:xmax, :] = output_im
cv2.imwrite('output.jpg', cand_img)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.cast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class CastOpTest(test.TestCase):
def _toDataType(self, dtype):
"""Returns TensorFlow data type for numpy type."""
if dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
elif dtype == np.bool_:
return dtypes.bool
elif dtype == np.complex64:
return dtypes.complex64
elif dtype == np.complex128:
return dtypes.complex128
else:
return None
def _cast(self, x, dtype, use_gpu=False):
with test_util.device(use_gpu):
val = constant_op.constant(x, self._toDataType(np.array([x]).dtype))
cast = math_ops.cast(val, self._toDataType(dtype), name="cast")
return self.evaluate(cast)
def _test(self, x, dtype, use_gpu=False):
"""Tests cast(x) to dtype behaves the same as numpy.astype."""
np_ans = x.astype(dtype)
tf_ans = self._cast(x, dtype, use_gpu)
self.assertAllEqual(np_ans, tf_ans)
def _testTypes(self, x, use_gpu=False):
"""Tests cast(x) to different tf."""
if use_gpu:
type_list = [
np.float32, np.float64, np.int64, np.complex64, np.complex128
]
else:
type_list = [
np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]
for from_type in type_list:
for to_type in type_list:
self._test(x.astype(from_type), to_type, use_gpu)
self._test(x.astype(np.bool_), np.float32, use_gpu)
self._test(x.astype(np.uint8), np.float32, use_gpu)
if not use_gpu:
self._test(x.astype(np.bool_), np.int32, use_gpu)
self._test(x.astype(np.int32), np.int32, use_gpu)
def _testAll(self, x):
self._testTypes(x, use_gpu=False)
if x.dtype == np.float32 or x.dtype == np.float64:
self._testTypes(x, use_gpu=True)
def testBasic(self):
self._testAll(np.arange(-10, 10).reshape(2, 10))
self._testAll(np.linspace(-10, 10, 17))
def testSmallValues(self):
f4 = np.finfo(np.float32)
f8 = np.finfo(np.float64)
self._testAll(
np.array([
0, -1, 1, -f4.resolution, f4.resolution, f8.resolution,
-f8.resolution
]))
def testBfloat16(self):
a = np.random.uniform(-100, 100, 100).astype(np.float32)
with self.cached_session(use_gpu=False):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
with self.cached_session():
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
def testRandom(self):
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))
# Special values like int32max, int64min, inf, -inf, nan casted to
# integer values in somewhat unexpected ways. And they behave
# differently on CPU and GPU.
def _compare(self, x, dst_dtype, expected, use_gpu=False):
np.testing.assert_equal(
self._cast(
x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))
def testIntToFloatBoundary(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(i4.min, np.float32, i4.min, False)
self._compare(i4.max, np.float32, i4.max, False)
self._compare(i8.min, np.float32, i8.min, False)
self._compare(i8.max, np.float32, i8.max, False)
self._compare(i4.min, np.float64, i4.min, False)
self._compare(i4.max, np.float64, i4.max, False)
self._compare(i8.min, np.float64, i8.min, False)
self._compare(i8.max, np.float64, i8.max, False)
# NOTE: GPU does not support int32/int64 for casting.
def testInfNan(self):
self._compare(np.inf, np.float32, np.inf, False)
self._compare(np.inf, np.float64, np.inf, False)
self._compare(-np.inf, np.float32, -np.inf, False)
self._compare(-np.inf, np.float64, -np.inf, False)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)
self._compare(np.inf, np.float32, np.inf, True)
self._compare(np.inf, np.float64, np.inf, True)
self._compare(-np.inf, np.float32, -np.inf, True)
self._compare(-np.inf, np.float64, -np.inf, True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)
def _OpError(self, x, dtype, err):
with self.assertRaisesOpError(err):
self.evaluate(math_ops.cast(x, dtype))
def testNotImplemented(self):
self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int.*string.*")
def testCastToTypeOfVariable(self):
with self.cached_session():
x = variables.Variable(5, dtype=dtypes.float32)
y = variables.Variable(True, dtype=dtypes.bool)
cast = math_ops.cast(y, x.dtype)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(cast))
def testGradients(self):
t = [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
for src_t in t:
for dst_t in t:
with self.cached_session():
x = constant_op.constant(1.0, src_t)
def cast(x, dst_t=dst_t):
x = array_ops.identity(x)
x = math_ops.cast(x, dst_t)
return x
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(cast, [x]))
self.assertLess(err, 1e-3)
class SparseTensorCastTest(test.TestCase):
def testCast(self):
indices = constant_op.constant([[0], [1], [2]], dtypes.int64)
values = constant_op.constant(np.array([1, 2, 3], np.int64))
shape = constant_op.constant([3], dtypes.int64)
st = sparse_tensor.SparseTensor(indices, values, shape)
st_cast = math_ops.cast(st, dtypes.float32)
self.assertAllEqual(st_cast.indices, [[0], [1], [2]])
self.assertAllEqual(st_cast.values,
np.array([1, 2, 3], np.float32))
self.assertAllEqual(st_cast.dense_shape, [3])
class SaturateCastTest(test.TestCase):
def testSaturate(self):
in_types = dtypes.float32,
out_types = dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.float32
for in_type in in_types:
for out_type in out_types:
lo, hi = in_type.min, in_type.max
x = constant_op.constant(
[lo, lo + 1, lo // 2, hi // 2, hi - 1, hi], dtype=in_type)
y = math_ops.saturate_cast(x, dtype=out_type)
self.assertEqual(y.dtype, out_type)
x, y = self.evaluate([x, y])
correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
self.assertAllEqual(correct, y)
if __name__ == "__main__":
test.main()
| |
"""Tests for the Device Registry."""
import time
from unittest.mock import patch
import pytest
from homeassistant import config_entries
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import RequiredParameterMissing
from homeassistant.helpers import device_registry, entity_registry
from tests.common import (
MockConfigEntry,
flush_store,
mock_area_registry,
mock_device_registry,
)
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def area_registry(hass):
"""Return an empty, loaded, registry."""
return mock_area_registry(hass)
@pytest.fixture
def update_events(hass):
"""Capture update events."""
events = []
@callback
def async_capture(event):
events.append(event.data)
hass.bus.async_listen(device_registry.EVENT_DEVICE_REGISTRY_UPDATED, async_capture)
return events
async def test_get_or_create_returns_same_entry(
hass, registry, area_registry, update_events
):
"""Make sure we do not duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
sw_version="sw-version",
name="name",
manufacturer="manufacturer",
model="model",
suggested_area="Game Room",
)
entry2 = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "11:22:33:66:77:88")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
suggested_area="Game Room",
)
entry3 = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
game_room_area = area_registry.async_get_area_by_name("Game Room")
assert game_room_area is not None
assert len(area_registry.areas) == 1
assert len(registry.devices) == 1
assert entry.area_id == game_room_area.id
assert entry.id == entry2.id
assert entry.id == entry3.id
assert entry.identifiers == {("bridgeid", "0123")}
assert entry2.area_id == game_room_area.id
assert entry3.manufacturer == "manufacturer"
assert entry3.model == "model"
assert entry3.name == "name"
assert entry3.sw_version == "sw-version"
assert entry3.suggested_area == "Game Room"
assert entry3.area_id == game_room_area.id
await hass.async_block_till_done()
# Only 2 update events. The third entry did not generate any changes.
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry.id
assert update_events[1]["changes"] == {
"connections": {("mac", "12:34:56:ab:cd:ef")}
}
async def test_requirement_for_identifier_or_connection(registry):
"""Make sure we do require some descriptor of device."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers=set(),
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="1234",
connections=set(),
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert entry
assert entry2
with pytest.raises(RequiredParameterMissing) as exc_info:
registry.async_get_or_create(
config_entry_id="1234",
connections=set(),
identifiers=set(),
manufacturer="manufacturer",
model="model",
)
assert exc_info.value.parameter_names == ["identifiers", "connections"]
async def test_multiple_config_entries(registry):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 1
assert entry.id == entry2.id
assert entry.id == entry3.id
assert entry2.config_entries == {"123", "456"}
@pytest.mark.parametrize("load_registries", [False])
async def test_loading_from_storage(hass, hass_storage):
"""Test loading stored devices on start."""
hass_storage[device_registry.STORAGE_KEY] = {
"version": device_registry.STORAGE_VERSION_MAJOR,
"minor_version": device_registry.STORAGE_VERSION_MINOR,
"data": {
"devices": [
{
"area_id": "12345A",
"config_entries": ["1234"],
"configuration_url": None,
"connections": [["Zigbee", "01.23.45.67.89"]],
"disabled_by": device_registry.DeviceEntryDisabler.USER,
"entry_type": device_registry.DeviceEntryType.SERVICE,
"id": "abcdefghijklm",
"identifiers": [["serial", "12:34:56:AB:CD:EF"]],
"manufacturer": "manufacturer",
"model": "model",
"name_by_user": "Test Friendly Name",
"name": "name",
"sw_version": "version",
"hw_version": "hw_version",
"via_device_id": None,
}
],
"deleted_devices": [
{
"config_entries": ["1234"],
"connections": [["Zigbee", "23.45.67.89.01"]],
"id": "bcdefghijklmn",
"identifiers": [["serial", "34:56:AB:CD:EF:12"]],
"orphaned_timestamp": None,
}
],
},
}
await device_registry.async_load(hass)
registry = device_registry.async_get(hass)
assert len(registry.devices) == 1
assert len(registry.deleted_devices) == 1
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("Zigbee", "01.23.45.67.89")},
identifiers={("serial", "12:34:56:AB:CD:EF")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == "abcdefghijklm"
assert entry.area_id == "12345A"
assert entry.name_by_user == "Test Friendly Name"
assert entry.hw_version == "hw_version"
assert entry.entry_type is device_registry.DeviceEntryType.SERVICE
assert entry.disabled_by is device_registry.DeviceEntryDisabler.USER
assert isinstance(entry.config_entries, set)
assert isinstance(entry.connections, set)
assert isinstance(entry.identifiers, set)
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("Zigbee", "23.45.67.89.01")},
identifiers={("serial", "34:56:AB:CD:EF:12")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == "bcdefghijklmn"
assert isinstance(entry.config_entries, set)
assert isinstance(entry.connections, set)
assert isinstance(entry.identifiers, set)
@pytest.mark.parametrize("load_registries", [False])
async def test_migration_1_1_to_1_3(hass, hass_storage):
"""Test migration from version 1.1 to 1.3."""
hass_storage[device_registry.STORAGE_KEY] = {
"version": 1,
"minor_version": 1,
"data": {
"devices": [
{
"config_entries": ["1234"],
"connections": [["Zigbee", "01.23.45.67.89"]],
"entry_type": "service",
"id": "abcdefghijklm",
"identifiers": [["serial", "12:34:56:AB:CD:EF"]],
"manufacturer": "manufacturer",
"model": "model",
"name": "name",
"sw_version": "version",
},
# Invalid entry type
{
"config_entries": [None],
"connections": [],
"entry_type": "INVALID_VALUE",
"id": "invalid-entry-type",
"identifiers": [["serial", "mock-id-invalid-entry"]],
"manufacturer": None,
"model": None,
"name": None,
"sw_version": None,
},
],
"deleted_devices": [
{
"config_entries": ["123456"],
"connections": [],
"entry_type": "service",
"id": "deletedid",
"identifiers": [["serial", "12:34:56:AB:CD:FF"]],
"manufacturer": "manufacturer",
"model": "model",
"name": "name",
"sw_version": "version",
}
],
},
}
await device_registry.async_load(hass)
registry = device_registry.async_get(hass)
# Test data was loaded
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("Zigbee", "01.23.45.67.89")},
identifiers={("serial", "12:34:56:AB:CD:EF")},
)
assert entry.id == "abcdefghijklm"
# Update to trigger a store
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("Zigbee", "01.23.45.67.89")},
identifiers={("serial", "12:34:56:AB:CD:EF")},
sw_version="new_version",
)
assert entry.id == "abcdefghijklm"
# Check we store migrated data
await flush_store(registry._store)
assert hass_storage[device_registry.STORAGE_KEY] == {
"version": device_registry.STORAGE_VERSION_MAJOR,
"minor_version": device_registry.STORAGE_VERSION_MINOR,
"key": device_registry.STORAGE_KEY,
"data": {
"devices": [
{
"area_id": None,
"config_entries": ["1234"],
"configuration_url": None,
"connections": [["Zigbee", "01.23.45.67.89"]],
"disabled_by": None,
"entry_type": "service",
"id": "abcdefghijklm",
"identifiers": [["serial", "12:34:56:AB:CD:EF"]],
"manufacturer": "manufacturer",
"model": "model",
"name": "name",
"name_by_user": None,
"sw_version": "new_version",
"hw_version": None,
"via_device_id": None,
},
{
"area_id": None,
"config_entries": [None],
"configuration_url": None,
"connections": [],
"disabled_by": None,
"entry_type": None,
"id": "invalid-entry-type",
"identifiers": [["serial", "mock-id-invalid-entry"]],
"manufacturer": None,
"model": None,
"name_by_user": None,
"name": None,
"sw_version": None,
"hw_version": None,
"via_device_id": None,
},
],
"deleted_devices": [
{
"config_entries": ["123456"],
"connections": [],
"id": "deletedid",
"identifiers": [["serial", "12:34:56:AB:CD:FF"]],
"orphaned_timestamp": None,
}
],
},
}
@pytest.mark.parametrize("load_registries", [False])
async def test_migration_1_2_to_1_3(hass, hass_storage):
"""Test migration from version 1.2 to 1.3."""
hass_storage[device_registry.STORAGE_KEY] = {
"version": 1,
"minor_version": 2,
"key": device_registry.STORAGE_KEY,
"data": {
"devices": [
{
"area_id": None,
"config_entries": ["1234"],
"configuration_url": None,
"connections": [["Zigbee", "01.23.45.67.89"]],
"disabled_by": None,
"entry_type": "service",
"id": "abcdefghijklm",
"identifiers": [["serial", "12:34:56:AB:CD:EF"]],
"manufacturer": "manufacturer",
"model": "model",
"name": "name",
"name_by_user": None,
"sw_version": "new_version",
"hw_version": None,
"via_device_id": None,
},
{
"area_id": None,
"config_entries": [None],
"configuration_url": None,
"connections": [],
"disabled_by": None,
"entry_type": None,
"id": "invalid-entry-type",
"identifiers": [["serial", "mock-id-invalid-entry"]],
"manufacturer": None,
"model": None,
"name_by_user": None,
"name": None,
"sw_version": None,
"hw_version": None,
"via_device_id": None,
},
],
"deleted_devices": [],
},
}
await device_registry.async_load(hass)
registry = device_registry.async_get(hass)
# Test data was loaded
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("Zigbee", "01.23.45.67.89")},
identifiers={("serial", "12:34:56:AB:CD:EF")},
)
assert entry.id == "abcdefghijklm"
# Update to trigger a store
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("Zigbee", "01.23.45.67.89")},
identifiers={("serial", "12:34:56:AB:CD:EF")},
hw_version="new_version",
)
assert entry.id == "abcdefghijklm"
# Check we store migrated data
await flush_store(registry._store)
assert hass_storage[device_registry.STORAGE_KEY] == {
"version": device_registry.STORAGE_VERSION_MAJOR,
"minor_version": device_registry.STORAGE_VERSION_MINOR,
"key": device_registry.STORAGE_KEY,
"data": {
"devices": [
{
"area_id": None,
"config_entries": ["1234"],
"configuration_url": None,
"connections": [["Zigbee", "01.23.45.67.89"]],
"disabled_by": None,
"entry_type": "service",
"id": "abcdefghijklm",
"identifiers": [["serial", "12:34:56:AB:CD:EF"]],
"manufacturer": "manufacturer",
"model": "model",
"name": "name",
"name_by_user": None,
"sw_version": "new_version",
"hw_version": "new_version",
"via_device_id": None,
},
{
"area_id": None,
"config_entries": [None],
"configuration_url": None,
"connections": [],
"disabled_by": None,
"entry_type": None,
"id": "invalid-entry-type",
"identifiers": [["serial", "mock-id-invalid-entry"]],
"manufacturer": None,
"model": None,
"name_by_user": None,
"name": None,
"sw_version": None,
"hw_version": None,
"via_device_id": None,
},
],
"deleted_devices": [],
},
}
async def test_removing_config_entries(hass, registry, update_events):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:78:CD:EF:12")},
identifiers={("bridgeid", "4567")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert entry.id == entry2.id
assert entry.id != entry3.id
assert entry2.config_entries == {"123", "456"}
registry.async_clear_config_entry("123")
entry = registry.async_get_device({("bridgeid", "0123")})
entry3_removed = registry.async_get_device({("bridgeid", "4567")})
assert entry.config_entries == {"456"}
assert entry3_removed is None
await hass.async_block_till_done()
assert len(update_events) == 5
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry2.id
assert update_events[1]["changes"] == {"config_entries": {"123"}}
assert update_events[2]["action"] == "create"
assert update_events[2]["device_id"] == entry3.id
assert "changes" not in update_events[2]
assert update_events[3]["action"] == "update"
assert update_events[3]["device_id"] == entry.id
assert update_events[3]["changes"] == {"config_entries": {"456", "123"}}
assert update_events[4]["action"] == "remove"
assert update_events[4]["device_id"] == entry3.id
assert "changes" not in update_events[4]
async def test_deleted_device_removing_config_entries(hass, registry, update_events):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:78:CD:EF:12")},
identifiers={("bridgeid", "4567")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert len(registry.deleted_devices) == 0
assert entry.id == entry2.id
assert entry.id != entry3.id
assert entry2.config_entries == {"123", "456"}
registry.async_remove_device(entry.id)
registry.async_remove_device(entry3.id)
assert len(registry.devices) == 0
assert len(registry.deleted_devices) == 2
await hass.async_block_till_done()
assert len(update_events) == 5
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry2.id
assert update_events[1]["changes"] == {"config_entries": {"123"}}
assert update_events[2]["action"] == "create"
assert update_events[2]["device_id"] == entry3.id
assert "changes" not in update_events[2]["device_id"]
assert update_events[3]["action"] == "remove"
assert update_events[3]["device_id"] == entry.id
assert "changes" not in update_events[3]
assert update_events[4]["action"] == "remove"
assert update_events[4]["device_id"] == entry3.id
assert "changes" not in update_events[4]
registry.async_clear_config_entry("123")
assert len(registry.devices) == 0
assert len(registry.deleted_devices) == 2
registry.async_clear_config_entry("456")
assert len(registry.devices) == 0
assert len(registry.deleted_devices) == 2
# No event when a deleted device is purged
await hass.async_block_till_done()
assert len(update_events) == 5
# Re-add, expect to keep the device id
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == entry2.id
future_time = time.time() + device_registry.ORPHANED_DEVICE_KEEP_SECONDS + 1
with patch("time.time", return_value=future_time):
registry.async_purge_expired_orphaned_devices()
# Re-add, expect to get a new device id after the purge
entry4 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert entry3.id != entry4.id
async def test_removing_area_id(registry):
"""Make sure we can clear area id."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry_w_area = registry.async_update_device(entry.id, area_id="12345A")
registry.async_clear_area_id("12345A")
entry_wo_area = registry.async_get_device({("bridgeid", "0123")})
assert not entry_wo_area.area_id
assert entry_w_area != entry_wo_area
async def test_deleted_device_removing_area_id(registry):
"""Make sure we can clear area id of deleted device."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry_w_area = registry.async_update_device(entry.id, area_id="12345A")
registry.async_remove_device(entry.id)
registry.async_clear_area_id("12345A")
entry2 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == entry2.id
entry_wo_area = registry.async_get_device({("bridgeid", "0123")})
assert not entry_wo_area.area_id
assert entry_w_area != entry_wo_area
async def test_specifying_via_device_create(registry):
"""Test specifying a via_device and removal of the hub device."""
via = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "0123")},
manufacturer="manufacturer",
model="via",
)
light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert light.via_device_id == via.id
registry.async_remove_device(via.id)
light = registry.async_get_device({("hue", "456")})
assert light.via_device_id is None
async def test_specifying_via_device_update(registry):
"""Test specifying a via_device and updating."""
light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert light.via_device_id is None
via = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "0123")},
manufacturer="manufacturer",
model="via",
)
light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
assert light.via_device_id == via.id
async def test_loading_saving_data(hass, registry, area_registry):
"""Test that we load/save data correctly."""
orig_via = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "0123")},
manufacturer="manufacturer",
model="via",
name="Original Name",
sw_version="Orig SW 1",
entry_type=None,
)
orig_light = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "456")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
disabled_by=device_registry.DeviceEntryDisabler.USER,
)
orig_light2 = registry.async_get_or_create(
config_entry_id="456",
connections=set(),
identifiers={("hue", "789")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
)
registry.async_remove_device(orig_light2.id)
orig_light3 = registry.async_get_or_create(
config_entry_id="789",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:AB:CD:EF:12")},
identifiers={("hue", "abc")},
manufacturer="manufacturer",
model="light",
)
registry.async_get_or_create(
config_entry_id="abc",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:AB:CD:EF:12")},
identifiers={("abc", "123")},
manufacturer="manufacturer",
model="light",
)
registry.async_remove_device(orig_light3.id)
orig_light4 = registry.async_get_or_create(
config_entry_id="789",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:AB:CD:EF:12")},
identifiers={("hue", "abc")},
manufacturer="manufacturer",
model="light",
entry_type=device_registry.DeviceEntryType.SERVICE,
)
assert orig_light4.id == orig_light3.id
orig_kitchen_light = registry.async_get_or_create(
config_entry_id="999",
connections=set(),
identifiers={("hue", "999")},
manufacturer="manufacturer",
model="light",
via_device=("hue", "0123"),
disabled_by=device_registry.DeviceEntryDisabler.USER,
suggested_area="Kitchen",
)
assert len(registry.devices) == 4
assert len(registry.deleted_devices) == 1
orig_via = registry.async_update_device(
orig_via.id, area_id="mock-area-id", name_by_user="mock-name-by-user"
)
# Now load written data in new registry
registry2 = device_registry.DeviceRegistry(hass)
await flush_store(registry._store)
await registry2.async_load()
# Ensure same order
assert list(registry.devices) == list(registry2.devices)
assert list(registry.deleted_devices) == list(registry2.deleted_devices)
new_via = registry2.async_get_device({("hue", "0123")})
new_light = registry2.async_get_device({("hue", "456")})
new_light4 = registry2.async_get_device({("hue", "abc")})
assert orig_via == new_via
assert orig_light == new_light
assert orig_light4 == new_light4
# Ensure enums converted
for (old, new) in (
(orig_via, new_via),
(orig_light, new_light),
(orig_light4, new_light4),
):
assert old.disabled_by is new.disabled_by
assert old.entry_type is new.entry_type
# Ensure a save/load cycle does not keep suggested area
new_kitchen_light = registry2.async_get_device({("hue", "999")})
assert orig_kitchen_light.suggested_area == "Kitchen"
orig_kitchen_light_witout_suggested_area = registry.async_update_device(
orig_kitchen_light.id, suggested_area=None
)
assert orig_kitchen_light_witout_suggested_area.suggested_area is None
assert orig_kitchen_light_witout_suggested_area == new_kitchen_light
async def test_no_unnecessary_changes(registry):
"""Make sure we do not consider devices changes."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={("ethernet", "12:34:56:78:90:AB:CD:EF")},
identifiers={("hue", "456"), ("bla", "123")},
)
with patch(
"homeassistant.helpers.device_registry.DeviceRegistry.async_schedule_save"
) as mock_save:
entry2 = registry.async_get_or_create(
config_entry_id="1234", identifiers={("hue", "456")}
)
assert entry.id == entry2.id
assert len(mock_save.mock_calls) == 0
async def test_format_mac(registry):
"""Make sure we normalize mac addresses."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for mac in ["123456ABCDEF", "123456abcdef", "12:34:56:ab:cd:ef", "1234.56ab.cdef"]:
test_entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, mac)},
)
assert test_entry.id == entry.id, mac
assert test_entry.connections == {
(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:ab:cd:ef")
}
# This should not raise
for invalid in [
"invalid_mac",
"123456ABCDEFG", # 1 extra char
"12:34:56:ab:cdef", # not enough :
"12:34:56:ab:cd:e:f", # too many :
"1234.56abcdef", # not enough .
"123.456.abc.def", # too many .
]:
invalid_mac_entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, invalid)},
)
assert list(invalid_mac_entry.connections)[0][1] == invalid
async def test_update(hass, registry, update_events):
"""Verify that we can update some attributes of a device."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("hue", "456"), ("bla", "123")},
)
new_identifiers = {("hue", "654"), ("bla", "321")}
assert not entry.area_id
assert not entry.name_by_user
with patch.object(registry, "async_schedule_save") as mock_save:
updated_entry = registry.async_update_device(
entry.id,
area_id="12345A",
manufacturer="Test Producer",
model="Test Model",
name_by_user="Test Friendly Name",
new_identifiers=new_identifiers,
via_device_id="98765B",
disabled_by=device_registry.DeviceEntryDisabler.USER,
)
assert mock_save.call_count == 1
assert updated_entry != entry
assert updated_entry.area_id == "12345A"
assert updated_entry.manufacturer == "Test Producer"
assert updated_entry.model == "Test Model"
assert updated_entry.name_by_user == "Test Friendly Name"
assert updated_entry.identifiers == new_identifiers
assert updated_entry.via_device_id == "98765B"
assert updated_entry.disabled_by is device_registry.DeviceEntryDisabler.USER
assert registry.async_get_device({("hue", "456")}) is None
assert registry.async_get_device({("bla", "123")}) is None
assert registry.async_get_device({("hue", "654")}) == updated_entry
assert registry.async_get_device({("bla", "321")}) == updated_entry
assert (
registry.async_get_device(
{}, {(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}
)
== updated_entry
)
assert registry.async_get(updated_entry.id) is not None
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry.id
assert update_events[1]["changes"] == {
"area_id": None,
"disabled_by": None,
"identifiers": {("bla", "123"), ("hue", "456")},
"manufacturer": None,
"model": None,
"name_by_user": None,
"via_device_id": None,
}
async def test_update_remove_config_entries(hass, registry, update_events):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry2 = registry.async_get_or_create(
config_entry_id="456",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:78:CD:EF:12")},
identifiers={("bridgeid", "4567")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 2
assert entry.id == entry2.id
assert entry.id != entry3.id
assert entry2.config_entries == {"123", "456"}
updated_entry = registry.async_update_device(
entry2.id, remove_config_entry_id="123"
)
removed_entry = registry.async_update_device(
entry3.id, remove_config_entry_id="123"
)
assert updated_entry.config_entries == {"456"}
assert removed_entry is None
removed_entry = registry.async_get_device({("bridgeid", "4567")})
assert removed_entry is None
await hass.async_block_till_done()
assert len(update_events) == 5
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry2.id
assert update_events[1]["changes"] == {"config_entries": {"123"}}
assert update_events[2]["action"] == "create"
assert update_events[2]["device_id"] == entry3.id
assert "changes" not in update_events[2]
assert update_events[3]["action"] == "update"
assert update_events[3]["device_id"] == entry.id
assert update_events[3]["changes"] == {"config_entries": {"456", "123"}}
assert update_events[4]["action"] == "remove"
assert update_events[4]["device_id"] == entry3.id
assert "changes" not in update_events[4]
async def test_update_sw_version(hass, registry, update_events):
"""Verify that we can update software version of a device."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bla", "123")},
)
assert not entry.sw_version
sw_version = "0x20020263"
with patch.object(registry, "async_schedule_save") as mock_save:
updated_entry = registry.async_update_device(entry.id, sw_version=sw_version)
assert mock_save.call_count == 1
assert updated_entry != entry
assert updated_entry.sw_version == sw_version
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry.id
assert update_events[1]["changes"] == {"sw_version": None}
async def test_update_hw_version(hass, registry, update_events):
"""Verify that we can update hardware version of a device."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bla", "123")},
)
assert not entry.hw_version
hw_version = "0x20020263"
with patch.object(registry, "async_schedule_save") as mock_save:
updated_entry = registry.async_update_device(entry.id, hw_version=hw_version)
assert mock_save.call_count == 1
assert updated_entry != entry
assert updated_entry.hw_version == hw_version
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry.id
assert update_events[1]["changes"] == {"hw_version": None}
async def test_update_suggested_area(hass, registry, area_registry, update_events):
"""Verify that we can update the suggested area version of a device."""
entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bla", "123")},
)
assert not entry.suggested_area
assert entry.area_id is None
suggested_area = "Pool"
with patch.object(registry, "async_schedule_save") as mock_save:
updated_entry = registry.async_update_device(
entry.id, suggested_area=suggested_area
)
assert mock_save.call_count == 1
assert updated_entry != entry
assert updated_entry.suggested_area == suggested_area
pool_area = area_registry.async_get_area_by_name("Pool")
assert pool_area is not None
assert updated_entry.area_id == pool_area.id
assert len(area_registry.areas) == 1
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry.id
assert update_events[1]["changes"] == {"area_id": None, "suggested_area": None}
async def test_cleanup_device_registry(hass, registry):
"""Test cleanup works."""
config_entry = MockConfigEntry(domain="hue")
config_entry.add_to_hass(hass)
d1 = registry.async_get_or_create(
identifiers={("hue", "d1")}, config_entry_id=config_entry.entry_id
)
registry.async_get_or_create(
identifiers={("hue", "d2")}, config_entry_id=config_entry.entry_id
)
d3 = registry.async_get_or_create(
identifiers={("hue", "d3")}, config_entry_id=config_entry.entry_id
)
registry.async_get_or_create(
identifiers={("something", "d4")}, config_entry_id="non_existing"
)
ent_reg = entity_registry.async_get(hass)
ent_reg.async_get_or_create("light", "hue", "e1", device_id=d1.id)
ent_reg.async_get_or_create("light", "hue", "e2", device_id=d1.id)
ent_reg.async_get_or_create("light", "hue", "e3", device_id=d3.id)
device_registry.async_cleanup(hass, registry, ent_reg)
assert registry.async_get_device({("hue", "d1")}) is not None
assert registry.async_get_device({("hue", "d2")}) is not None
assert registry.async_get_device({("hue", "d3")}) is not None
assert registry.async_get_device({("something", "d4")}) is None
async def test_cleanup_device_registry_removes_expired_orphaned_devices(hass, registry):
"""Test cleanup removes expired orphaned devices."""
config_entry = MockConfigEntry(domain="hue")
config_entry.add_to_hass(hass)
registry.async_get_or_create(
identifiers={("hue", "d1")}, config_entry_id=config_entry.entry_id
)
registry.async_get_or_create(
identifiers={("hue", "d2")}, config_entry_id=config_entry.entry_id
)
registry.async_get_or_create(
identifiers={("hue", "d3")}, config_entry_id=config_entry.entry_id
)
registry.async_clear_config_entry(config_entry.entry_id)
assert len(registry.devices) == 0
assert len(registry.deleted_devices) == 3
ent_reg = entity_registry.async_get(hass)
device_registry.async_cleanup(hass, registry, ent_reg)
assert len(registry.devices) == 0
assert len(registry.deleted_devices) == 3
future_time = time.time() + device_registry.ORPHANED_DEVICE_KEEP_SECONDS + 1
with patch("time.time", return_value=future_time):
device_registry.async_cleanup(hass, registry, ent_reg)
assert len(registry.devices) == 0
assert len(registry.deleted_devices) == 0
async def test_cleanup_startup(hass):
"""Test we run a cleanup on startup."""
hass.state = CoreState.not_running
with patch(
"homeassistant.helpers.device_registry.Debouncer.async_call"
) as mock_call:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
@pytest.mark.parametrize("load_registries", [False])
async def test_cleanup_entity_registry_change(hass):
"""Test we run a cleanup when entity registry changes.
Don't pre-load the registries as the debouncer will then not be waiting for
EVENT_ENTITY_REGISTRY_UPDATED events.
"""
await device_registry.async_load(hass)
await entity_registry.async_load(hass)
ent_reg = entity_registry.async_get(hass)
with patch(
"homeassistant.helpers.device_registry.Debouncer.async_call"
) as mock_call:
entity = ent_reg.async_get_or_create("light", "hue", "e1")
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 0
# Normal update does not trigger
ent_reg.async_update_entity(entity.entity_id, name="updated")
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 0
# Device ID update triggers
ent_reg.async_get_or_create("light", "hue", "e1", device_id="bla")
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 1
# Removal also triggers
ent_reg.async_remove(entity.entity_id)
await hass.async_block_till_done()
assert len(mock_call.mock_calls) == 2
async def test_restore_device(hass, registry, update_events):
"""Make sure device id is stable."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 1
assert len(registry.deleted_devices) == 0
registry.async_remove_device(entry.id)
assert len(registry.devices) == 0
assert len(registry.deleted_devices) == 1
entry2 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:78:CD:EF:12")},
identifiers={("bridgeid", "4567")},
manufacturer="manufacturer",
model="model",
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == entry3.id
assert entry.id != entry2.id
assert len(registry.devices) == 2
assert len(registry.deleted_devices) == 0
assert isinstance(entry3.config_entries, set)
assert isinstance(entry3.connections, set)
assert isinstance(entry3.identifiers, set)
await hass.async_block_till_done()
assert len(update_events) == 4
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "remove"
assert update_events[1]["device_id"] == entry.id
assert "changes" not in update_events[1]
assert update_events[2]["action"] == "create"
assert update_events[2]["device_id"] == entry2.id
assert "changes" not in update_events[2]
assert update_events[3]["action"] == "create"
assert update_events[3]["device_id"] == entry3.id
assert "changes" not in update_events[3]
async def test_restore_simple_device(hass, registry, update_events):
"""Make sure device id is stable."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
)
assert len(registry.devices) == 1
assert len(registry.deleted_devices) == 0
registry.async_remove_device(entry.id)
assert len(registry.devices) == 0
assert len(registry.deleted_devices) == 1
entry2 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:78:CD:EF:12")},
identifiers={("bridgeid", "4567")},
)
entry3 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
)
assert entry.id == entry3.id
assert entry.id != entry2.id
assert len(registry.devices) == 2
assert len(registry.deleted_devices) == 0
await hass.async_block_till_done()
assert len(update_events) == 4
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "remove"
assert update_events[1]["device_id"] == entry.id
assert "changes" not in update_events[1]
assert update_events[2]["action"] == "create"
assert update_events[2]["device_id"] == entry2.id
assert "changes" not in update_events[2]
assert update_events[3]["action"] == "create"
assert update_events[3]["device_id"] == entry3.id
assert "changes" not in update_events[3]
async def test_restore_shared_device(hass, registry, update_events):
"""Make sure device id is stable for shared devices."""
entry = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("entry_123", "0123")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 1
assert len(registry.deleted_devices) == 0
registry.async_get_or_create(
config_entry_id="234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("entry_234", "2345")},
manufacturer="manufacturer",
model="model",
)
assert len(registry.devices) == 1
assert len(registry.deleted_devices) == 0
registry.async_remove_device(entry.id)
assert len(registry.devices) == 0
assert len(registry.deleted_devices) == 1
entry2 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("entry_123", "0123")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == entry2.id
assert len(registry.devices) == 1
assert len(registry.deleted_devices) == 0
assert isinstance(entry2.config_entries, set)
assert isinstance(entry2.connections, set)
assert isinstance(entry2.identifiers, set)
registry.async_remove_device(entry.id)
entry3 = registry.async_get_or_create(
config_entry_id="234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("entry_234", "2345")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == entry3.id
assert len(registry.devices) == 1
assert len(registry.deleted_devices) == 0
assert isinstance(entry3.config_entries, set)
assert isinstance(entry3.connections, set)
assert isinstance(entry3.identifiers, set)
entry4 = registry.async_get_or_create(
config_entry_id="123",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("entry_123", "0123")},
manufacturer="manufacturer",
model="model",
)
assert entry.id == entry4.id
assert len(registry.devices) == 1
assert len(registry.deleted_devices) == 0
assert isinstance(entry4.config_entries, set)
assert isinstance(entry4.connections, set)
assert isinstance(entry4.identifiers, set)
await hass.async_block_till_done()
assert len(update_events) == 7
assert update_events[0]["action"] == "create"
assert update_events[0]["device_id"] == entry.id
assert "changes" not in update_events[0]
assert update_events[1]["action"] == "update"
assert update_events[1]["device_id"] == entry.id
assert update_events[1]["changes"] == {
"config_entries": {"123"},
"identifiers": {("entry_123", "0123")},
}
assert update_events[2]["action"] == "remove"
assert update_events[2]["device_id"] == entry.id
assert "changes" not in update_events[2]
assert update_events[3]["action"] == "create"
assert update_events[3]["device_id"] == entry.id
assert "changes" not in update_events[3]
assert update_events[4]["action"] == "remove"
assert update_events[4]["device_id"] == entry.id
assert "changes" not in update_events[4]
assert update_events[5]["action"] == "create"
assert update_events[5]["device_id"] == entry.id
assert "changes" not in update_events[5]
assert update_events[6]["action"] == "update"
assert update_events[6]["device_id"] == entry.id
assert update_events[6]["changes"] == {
"config_entries": {"234"},
"identifiers": {("entry_234", "2345")},
}
async def test_get_or_create_empty_then_set_default_values(hass, registry):
"""Test creating an entry, then setting default name, model, manufacturer."""
entry = registry.async_get_or_create(
identifiers={("bridgeid", "0123")}, config_entry_id="1234"
)
assert entry.name is None
assert entry.model is None
assert entry.manufacturer is None
entry = registry.async_get_or_create(
config_entry_id="1234",
identifiers={("bridgeid", "0123")},
default_name="default name 1",
default_model="default model 1",
default_manufacturer="default manufacturer 1",
)
assert entry.name == "default name 1"
assert entry.model == "default model 1"
assert entry.manufacturer == "default manufacturer 1"
entry = registry.async_get_or_create(
config_entry_id="1234",
identifiers={("bridgeid", "0123")},
default_name="default name 2",
default_model="default model 2",
default_manufacturer="default manufacturer 2",
)
assert entry.name == "default name 1"
assert entry.model == "default model 1"
assert entry.manufacturer == "default manufacturer 1"
async def test_get_or_create_empty_then_update(hass, registry):
"""Test creating an entry, then setting name, model, manufacturer."""
entry = registry.async_get_or_create(
identifiers={("bridgeid", "0123")}, config_entry_id="1234"
)
assert entry.name is None
assert entry.model is None
assert entry.manufacturer is None
entry = registry.async_get_or_create(
config_entry_id="1234",
identifiers={("bridgeid", "0123")},
name="name 1",
model="model 1",
manufacturer="manufacturer 1",
)
assert entry.name == "name 1"
assert entry.model == "model 1"
assert entry.manufacturer == "manufacturer 1"
entry = registry.async_get_or_create(
config_entry_id="1234",
identifiers={("bridgeid", "0123")},
default_name="default name 1",
default_model="default model 1",
default_manufacturer="default manufacturer 1",
)
assert entry.name == "name 1"
assert entry.model == "model 1"
assert entry.manufacturer == "manufacturer 1"
async def test_get_or_create_sets_default_values(hass, registry):
"""Test creating an entry, then setting default name, model, manufacturer."""
entry = registry.async_get_or_create(
config_entry_id="1234",
identifiers={("bridgeid", "0123")},
default_name="default name 1",
default_model="default model 1",
default_manufacturer="default manufacturer 1",
)
assert entry.name == "default name 1"
assert entry.model == "default model 1"
assert entry.manufacturer == "default manufacturer 1"
entry = registry.async_get_or_create(
config_entry_id="1234",
identifiers={("bridgeid", "0123")},
default_name="default name 2",
default_model="default model 2",
default_manufacturer="default manufacturer 2",
)
assert entry.name == "default name 1"
assert entry.model == "default model 1"
assert entry.manufacturer == "default manufacturer 1"
async def test_verify_suggested_area_does_not_overwrite_area_id(
hass, registry, area_registry
):
"""Make sure suggested area does not override a set area id."""
game_room_area = area_registry.async_create("Game Room")
original_entry = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
sw_version="sw-version",
name="name",
manufacturer="manufacturer",
model="model",
)
entry = registry.async_update_device(original_entry.id, area_id=game_room_area.id)
assert entry.area_id == game_room_area.id
entry2 = registry.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
identifiers={("bridgeid", "0123")},
sw_version="sw-version",
name="name",
manufacturer="manufacturer",
model="model",
suggested_area="New Game Room",
)
assert entry2.area_id == game_room_area.id
async def test_disable_config_entry_disables_devices(hass, registry):
"""Test that we disable entities tied to a config entry."""
config_entry = MockConfigEntry(domain="light")
config_entry.add_to_hass(hass)
entry1 = registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entry2 = registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "34:56:AB:CD:EF:12")},
disabled_by=device_registry.DeviceEntryDisabler.USER,
)
assert not entry1.disabled
assert entry2.disabled
await hass.config_entries.async_set_disabled_by(
config_entry.entry_id, config_entries.ConfigEntryDisabler.USER
)
await hass.async_block_till_done()
entry1 = registry.async_get(entry1.id)
assert entry1.disabled
assert entry1.disabled_by is device_registry.DeviceEntryDisabler.CONFIG_ENTRY
entry2 = registry.async_get(entry2.id)
assert entry2.disabled
assert entry2.disabled_by is device_registry.DeviceEntryDisabler.USER
await hass.config_entries.async_set_disabled_by(config_entry.entry_id, None)
await hass.async_block_till_done()
entry1 = registry.async_get(entry1.id)
assert not entry1.disabled
entry2 = registry.async_get(entry2.id)
assert entry2.disabled
assert entry2.disabled_by is device_registry.DeviceEntryDisabler.USER
async def test_only_disable_device_if_all_config_entries_are_disabled(hass, registry):
"""Test that we only disable device if all related config entries are disabled."""
config_entry1 = MockConfigEntry(domain="light")
config_entry1.add_to_hass(hass)
config_entry2 = MockConfigEntry(domain="light")
config_entry2.add_to_hass(hass)
registry.async_get_or_create(
config_entry_id=config_entry1.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entry1 = registry.async_get_or_create(
config_entry_id=config_entry2.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
assert len(entry1.config_entries) == 2
assert not entry1.disabled
await hass.config_entries.async_set_disabled_by(
config_entry1.entry_id, config_entries.ConfigEntryDisabler.USER
)
await hass.async_block_till_done()
entry1 = registry.async_get(entry1.id)
assert not entry1.disabled
await hass.config_entries.async_set_disabled_by(
config_entry2.entry_id, config_entries.ConfigEntryDisabler.USER
)
await hass.async_block_till_done()
entry1 = registry.async_get(entry1.id)
assert entry1.disabled
assert entry1.disabled_by is device_registry.DeviceEntryDisabler.CONFIG_ENTRY
await hass.config_entries.async_set_disabled_by(config_entry1.entry_id, None)
await hass.async_block_till_done()
entry1 = registry.async_get(entry1.id)
assert not entry1.disabled
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
from tempfile import mkdtemp
from shutil import rmtree
import numpy as np
import nibabel as nb
from nipype.testing import (assert_equal, assert_not_equal,
assert_raises, skipif)
import nipype.interfaces.fsl.utils as fsl
from nipype.interfaces.fsl import no_fsl
def create_files_in_directory():
outdir = mkdtemp()
cwd = os.getcwd()
os.chdir(outdir)
filelist = ['a.nii','b.nii']
for f in filelist:
hdr = nb.Nifti1Header()
shape = (3,3,3,4)
hdr.set_data_shape(shape)
img = np.random.random(shape)
nb.save(nb.Nifti1Image(img,np.eye(4),hdr),
os.path.join(outdir,f))
return filelist, outdir, cwd
def clean_directory(outdir, old_wd):
if os.path.exists(outdir):
rmtree(outdir)
os.chdir(old_wd)
@skipif(no_fsl)
def test_extractroi():
input_map = dict(args = dict(argstr='%s',),
environ = dict(),
in_file = dict(mandatory=True,argstr='%s',),
output_type = dict(),
roi_file = dict(argstr='%s',),
t_min = dict(argstr='%d',),
t_size = dict(argstr='%d',),
x_min = dict(argstr='%d',),
x_size = dict(argstr='%d',),
y_min = dict(argstr='%d',),
y_size = dict(argstr='%d',),
z_min = dict(argstr='%d',),
z_size = dict(argstr='%d',),
)
instance = fsl.ExtractROI()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_imagemaths():
input_map = dict(args = dict(argstr='%s',),
environ = dict(),
in_file = dict(argstr='%s',mandatory=True,),
in_file2 = dict(argstr='%s',),
op_string = dict(argstr='%s',),
out_data_type = dict(argstr='-odt %s',),
out_file = dict(argstr='%s',),
output_type = dict(),
suffix = dict(),
)
instance = fsl.ImageMaths()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_merge():
input_map = dict(args = dict(argstr='%s',),
dimension = dict(argstr='-%s',mandatory=True,),
environ = dict(),
in_files = dict(mandatory=True,argstr='%s',),
merged_file = dict(argstr='%s',),
output_type = dict(),
)
instance = fsl.Merge()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_filterregressor():
input_map = dict(Out_vnscales = dict(),
args = dict(argstr='%s',),
design_file = dict(mandatory=True,),
environ = dict(),
filter_columns = dict(mandatory=True,),
filter_all = dict(mandatory=True,),
in_file = dict(mandatory=True,),
mask = dict(),
out_file = dict(),
output_type = dict(),
var_norm = dict(),
)
instance = fsl.FilterRegressor()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_smooth():
input_map = dict(args = dict(argstr='%s',),
environ = dict(),
fwhm = dict(argstr='-kernel gauss %f -fmean',mandatory=True,),
in_file = dict(argstr='%s',mandatory=True,),
output_type = dict(),
smoothed_file = dict(argstr='%s',),
)
instance = fsl.Smooth()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_split():
input_map = dict(args = dict(argstr='%s',),
dimension = dict(argstr='-%s',),
environ = dict(),
in_file = dict(argstr='%s',),
out_base_name = dict(argstr='%s',),
output_type = dict(),
)
instance = fsl.Split()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
def no_fsl():
"""Checks if FSL is NOT installed
used with skipif to skip tests that will
fail if FSL is not installed"""
if fsl.Info().version() == None:
return True
else:
return False
@skipif(no_fsl)
def test_fslroi():
filelist, outdir, cwd = create_files_in_directory()
roi = fsl.ExtractROI()
# make sure command gets called
yield assert_equal, roi.cmd, 'fslroi'
# test raising error with mandatory args absent
yield assert_raises, ValueError, roi.run
# .inputs based parameters setting
roi.inputs.in_file = filelist[0]
roi.inputs.roi_file = 'foo_roi.nii'
roi.inputs.t_min = 10
roi.inputs.t_size = 20
yield assert_equal, roi.cmdline, 'fslroi %s foo_roi.nii 10 20'%filelist[0]
# .run based parameter setting
roi2 = fsl.ExtractROI(in_file=filelist[0],
roi_file='foo2_roi.nii',
t_min=20, t_size=40,
x_min=3, x_size=30,
y_min=40, y_size=10,
z_min=5, z_size=20)
yield assert_equal, roi2.cmdline, \
'fslroi %s foo2_roi.nii 3 30 40 10 5 20 20 40'%filelist[0]
clean_directory(outdir, cwd)
# test arguments for opt_map
# Fslroi class doesn't have a filled opt_map{}
# test fslmath
@skipif(no_fsl)
def test_fslmaths():
filelist, outdir, cwd = create_files_in_directory()
math = fsl.ImageMaths()
# make sure command gets called
yield assert_equal, math.cmd, 'fslmaths'
# test raising error with mandatory args absent
yield assert_raises, ValueError, math.run
# .inputs based parameters setting
math.inputs.in_file = filelist[0]
math.inputs.op_string = '-add 2.5 -mul input_volume2'
math.inputs.out_file = 'foo_math.nii'
yield assert_equal, math.cmdline, \
'fslmaths %s -add 2.5 -mul input_volume2 foo_math.nii'%filelist[0]
# .run based parameter setting
math2 = fsl.ImageMaths(in_file=filelist[0], op_string='-add 2.5',
out_file='foo2_math.nii')
yield assert_equal, math2.cmdline, 'fslmaths %s -add 2.5 foo2_math.nii'%filelist[0]
# test arguments for opt_map
# Fslmath class doesn't have opt_map{}
clean_directory(outdir, cwd)
# test overlay
@skipif(no_fsl)
def test_overlay():
filelist, outdir, cwd = create_files_in_directory()
overlay = fsl.Overlay()
# make sure command gets called
yield assert_equal, overlay.cmd, 'overlay'
# test raising error with mandatory args absent
yield assert_raises, ValueError, overlay.run
# .inputs based parameters setting
overlay.inputs.stat_image = filelist[0]
overlay.inputs.stat_thresh = (2.5, 10)
overlay.inputs.background_image = filelist[1]
overlay.inputs.auto_thresh_bg = True
overlay.inputs.show_negative_stats = True
overlay.inputs.out_file = 'foo_overlay.nii'
yield assert_equal, overlay.cmdline, \
'overlay 1 0 %s -a %s 2.50 10.00 %s -2.50 -10.00 foo_overlay.nii'%(
filelist[1],filelist[0],filelist[0])
# .run based parameter setting
overlay2 = fsl.Overlay(stat_image=filelist[0], stat_thresh=(2.5,10),
background_image=filelist[1], auto_thresh_bg=True,
out_file='foo2_overlay.nii')
yield assert_equal, overlay2.cmdline, 'overlay 1 0 %s -a %s 2.50 10.00 foo2_overlay.nii'%(
filelist[1], filelist[0])
clean_directory(outdir, cwd)
# test slicer
@skipif(no_fsl)
def test_slicer():
filelist, outdir, cwd = create_files_in_directory()
slicer = fsl.Slicer()
# make sure command gets called
yield assert_equal, slicer.cmd, 'slicer'
# test raising error with mandatory args absent
yield assert_raises, ValueError, slicer.run
# .inputs based parameters setting
slicer.inputs.in_file = filelist[0]
slicer.inputs.image_edges = filelist[1]
slicer.inputs.intensity_range = (10., 20.)
slicer.inputs.all_axial = True
slicer.inputs.image_width = 750
slicer.inputs.out_file = 'foo_bar.png'
yield assert_equal, slicer.cmdline, \
'slicer %s %s -L -i 10.000 20.000 -A 750 foo_bar.png'%(filelist[0],filelist[1])
# .run based parameter setting
slicer2 = fsl.Slicer(in_file = filelist[0], middle_slices = True, label_slices=False,
out_file='foo_bar2.png')
yield assert_equal, slicer2.cmdline, 'slicer %s -a foo_bar2.png'%(filelist[0])
clean_directory(outdir, cwd)
def create_parfiles():
np.savetxt('a.par',np.random.rand(6,3))
np.savetxt('b.par',np.random.rand(6,3))
return ['a.par', 'b.par']
# test fsl_tsplot
@skipif(no_fsl)
def test_plottimeseries():
filelist, outdir, cwd = create_files_in_directory()
parfiles = create_parfiles()
plotter = fsl.PlotTimeSeries()
# make sure command gets called
yield assert_equal, plotter.cmd, 'fsl_tsplot'
# test raising error with mandatory args absent
yield assert_raises, ValueError, plotter.run
# .inputs based parameters setting
plotter.inputs.in_file = parfiles[0]
plotter.inputs.labels = ['x','y','z']
plotter.inputs.y_range = (0,1)
plotter.inputs.title = 'test plot'
plotter.inputs.out_file = 'foo.png'
yield assert_equal, plotter.cmdline, \
('fsl_tsplot -i %s -a x,y,z -o foo.png -t \'test plot\' -u 1 --ymin=0 --ymax=1'
%parfiles[0])
# .run based parameter setting
plotter2 = fsl.PlotTimeSeries(in_file=parfiles, title='test2 plot', plot_range=(2,5),
out_file='bar.png')
yield assert_equal, plotter2.cmdline, \
'fsl_tsplot -i %s,%s -o bar.png --start=2 --finish=5 -t \'test2 plot\' -u 1'%tuple(parfiles)
clean_directory(outdir, cwd)
@skipif(no_fsl)
def test_plotmotionparams():
filelist, outdir, cwd = create_files_in_directory()
parfiles = create_parfiles()
plotter = fsl.PlotMotionParams()
# make sure command gets called
yield assert_equal, plotter.cmd, 'fsl_tsplot'
# test raising error with mandatory args absent
yield assert_raises, ValueError, plotter.run
# .inputs based parameters setting
plotter.inputs.in_file = parfiles[0]
plotter.inputs.in_source = 'fsl'
plotter.inputs.plot_type = 'rotations'
plotter.inputs.out_file = 'foo.png'
yield assert_equal, plotter.cmdline, \
('fsl_tsplot -i %s -o foo.png -t \'MCFLIRT estimated rotations (radians)\' '
'--start=1 --finish=3 -a x,y,z'%parfiles[0])
# .run based parameter setting
plotter2 = fsl.PlotMotionParams(in_file=parfiles[1],in_source='spm',plot_type='translations',
out_file='bar.png')
yield assert_equal, plotter2.cmdline, \
('fsl_tsplot -i %s -o bar.png -t \'Realign estimated translations (mm)\' '
'--start=1 --finish=3 -a x,y,z'%parfiles[1])
clean_directory(outdir, cwd)
@skipif(no_fsl)
def test_convertxfm():
filelist, outdir, cwd = create_files_in_directory()
cvt = fsl.ConvertXFM()
# make sure command gets called
yield assert_equal, cvt.cmd, "convert_xfm"
# test raising error with mandatory args absent
yield assert_raises, ValueError, cvt.run
# .inputs based parameters setting
cvt.inputs.in_file = filelist[0]
cvt.inputs.invert_xfm = True
cvt.inputs.out_file = "foo.mat"
yield assert_equal, cvt.cmdline, 'convert_xfm -omat foo.mat -inverse %s'%filelist[0]
# constructor based parameter setting
cvt2 = fsl.ConvertXFM(in_file=filelist[0], in_file2=filelist[1], concat_xfm=True,
out_file="bar.mat")
yield assert_equal, cvt2.cmdline, \
"convert_xfm -omat bar.mat -concat %s %s"%(filelist[1], filelist[0])
clean_directory(outdir, cwd)
@skipif(no_fsl)
def test_swapdims():
files, testdir, origdir = create_files_in_directory()
swap = fsl.SwapDimensions()
# Test the underlying command
yield assert_equal, swap.cmd, "fslswapdim"
# Test mandatory args
args = [dict(in_file=files[0]), dict(new_dims=("x","y","z"))]
for arg in args:
wontrun = fsl.SwapDimensions(**arg)
yield assert_raises, ValueError, wontrun.run
# Now test a basic command line
swap.inputs.in_file = files[0]
swap.inputs.new_dims = ("x", "y", "z")
yield assert_equal, swap.cmdline, "fslswapdim a.nii x y z %s"%os.path.realpath(os.path.join(testdir, "a_newdims.nii"))
# Test that we can set an output name
swap.inputs.out_file = "b.nii"
yield assert_equal, swap.cmdline, "fslswapdim a.nii x y z b.nii"
# Clean up
clean_directory(testdir, origdir)
| |
"""The tests for the webdav calendar component."""
import datetime
from caldav.objects import Event
import pytest
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from tests.async_mock import MagicMock, Mock, patch
# pylint: disable=redefined-outer-name
DEVICE_DATA = {"name": "Private Calendar", "device_id": "Private Calendar"}
EVENTS = [
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:1
DTSTAMP:20171125T000000Z
DTSTART:20171127T170000Z
DTEND:20171127T180000Z
SUMMARY:This is a normal event
LOCATION:Hamburg
DESCRIPTION:Surprisingly rainy
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Dynamics.//CalDAV Client//EN
BEGIN:VEVENT
UID:2
DTSTAMP:20171125T000000Z
DTSTART:20171127T100000Z
DTEND:20171127T110000Z
SUMMARY:This is an offset event !!-02:00
LOCATION:Hamburg
DESCRIPTION:Surprisingly shiny
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:3
DTSTAMP:20171125T000000Z
DTSTART:20171127
DTEND:20171128
SUMMARY:This is an all day event
LOCATION:Hamburg
DESCRIPTION:What a beautiful day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:4
DTSTAMP:20171125T000000Z
DTSTART:20171127
SUMMARY:This is an event without dtend or duration
LOCATION:Hamburg
DESCRIPTION:What an endless day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:5
DTSTAMP:20171125T000000Z
DTSTART:20171127
DURATION:PT1H
SUMMARY:This is an event with duration
LOCATION:Hamburg
DESCRIPTION:What a day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:6
DTSTAMP:20171125T000000Z
DTSTART:20171127T100000Z
DURATION:PT1H
SUMMARY:This is an event with duration
LOCATION:Hamburg
DESCRIPTION:What a day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:7
DTSTART;TZID=America/Los_Angeles:20171127T083000
DTSTAMP:20180301T020053Z
DTEND;TZID=America/Los_Angeles:20171127T093000
SUMMARY:Enjoy the sun
LOCATION:San Francisco
DESCRIPTION:Sunny day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:8
DTSTART:20171127T190000
DTEND:20171127T200000
SUMMARY:This is a floating Event
LOCATION:Hamburg
DESCRIPTION:What a day
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:9
DTSTAMP:20171125T000000Z
DTSTART:20171027T220000Z
DTEND:20171027T223000Z
SUMMARY:This is a recurring event
LOCATION:Hamburg
DESCRIPTION:Every day for a while
RRULE:FREQ=DAILY;UNTIL=20171227T215959
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:10
DTSTAMP:20171125T000000Z
DTSTART:20171027T230000Z
DURATION:PT30M
SUMMARY:This is a recurring event with a duration
LOCATION:Hamburg
DESCRIPTION:Every day for a while as well
RRULE:FREQ=DAILY;UNTIL=20171227T215959
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:11
DTSTAMP:20171125T000000Z
DTSTART:20171027T233000Z
DTEND:20171027T235959Z
SUMMARY:This is a recurring event that has ended
LOCATION:Hamburg
DESCRIPTION:Every day for a while
RRULE:FREQ=DAILY;UNTIL=20171127T225959
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//E-Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:12
DTSTAMP:20171125T000000Z
DTSTART:20171027T234500Z
DTEND:20171027T235959Z
SUMMARY:This is a recurring event that never ends
LOCATION:Hamburg
DESCRIPTION:Every day forever
RRULE:FREQ=DAILY
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:13
DTSTAMP:20161125T000000Z
DTSTART:20161127
DTEND:20161128
SUMMARY:This is a recurring all day event
LOCATION:Hamburg
DESCRIPTION:Groundhog Day
RRULE:FREQ=DAILY;COUNT=100
END:VEVENT
END:VCALENDAR
""",
"""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//Global Corp.//CalDAV Client//EN
BEGIN:VEVENT
UID:14
DTSTAMP:20151125T000000Z
DTSTART:20151127T000000Z
DTEND:20151127T003000Z
SUMMARY:This is an hourly recurring event
LOCATION:Hamburg
DESCRIPTION:The bell tolls for thee
RRULE:FREQ=HOURLY;INTERVAL=1;COUNT=12
END:VEVENT
END:VCALENDAR
""",
]
CALDAV_CONFIG = {
"platform": "caldav",
"url": "http://test.local",
"custom_calendars": [],
}
@pytest.fixture(autouse=True)
def mock_http(hass):
"""Mock the http component."""
hass.http = Mock()
@pytest.fixture
def mock_dav_client():
"""Mock the dav client."""
patch_dav_client = patch(
"caldav.DAVClient", return_value=_mocked_dav_client("First", "Second")
)
with patch_dav_client as dav_client:
yield dav_client
@pytest.fixture(name="calendar")
def mock_private_cal():
"""Mock a private calendar."""
_calendar = _mock_calendar("Private")
calendars = [_calendar]
client = _mocked_dav_client(calendars=calendars)
patch_dav_client = patch("caldav.DAVClient", return_value=client)
with patch_dav_client:
yield _calendar
def _local_datetime(hours, minutes):
"""Build a datetime object for testing in the correct timezone."""
return dt.as_local(datetime.datetime(2017, 11, 27, hours, minutes, 0))
def _mocked_dav_client(*names, calendars=None):
"""Mock requests.get invocations."""
if calendars is None:
calendars = [_mock_calendar(name) for name in names]
principal = Mock()
principal.calendars = MagicMock(return_value=calendars)
client = Mock()
client.principal = MagicMock(return_value=principal)
return client
def _mock_calendar(name):
events = []
for idx, event in enumerate(EVENTS):
events.append(Event(None, "%d.ics" % idx, event, None, str(idx)))
calendar = Mock()
calendar.date_search = MagicMock(return_value=events)
calendar.name = name
return calendar
async def test_setup_component(hass, mock_dav_client):
"""Test setup component with calendars."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.first")
assert state.name == "First"
state = hass.states.get("calendar.second")
assert state.name == "Second"
async def test_setup_component_with_no_calendar_matching(hass, mock_dav_client):
"""Test setup component with wrong calendar."""
config = dict(CALDAV_CONFIG)
config["calendars"] = ["none"]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
all_calendar_states = hass.states.async_entity_ids("calendar")
assert not all_calendar_states
async def test_setup_component_with_a_calendar_match(hass, mock_dav_client):
"""Test setup component with right calendar."""
config = dict(CALDAV_CONFIG)
config["calendars"] = ["Second"]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
all_calendar_states = hass.states.async_entity_ids("calendar")
assert len(all_calendar_states) == 1
state = hass.states.get("calendar.second")
assert state.name == "Second"
async def test_setup_component_with_one_custom_calendar(hass, mock_dav_client):
"""Test setup component with custom calendars."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "HomeOffice", "calendar": "Second", "search": "HomeOffice"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
all_calendar_states = hass.states.async_entity_ids("calendar")
assert len(all_calendar_states) == 1
state = hass.states.get("calendar.second_homeoffice")
assert state.name == "HomeOffice"
@patch("homeassistant.util.dt.now", return_value=_local_datetime(17, 45))
async def test_ongoing_event(mock_now, hass, calendar):
"""Test that the ongoing event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a normal event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 17:00:00",
"end_time": "2017-11-27 18:00:00",
"location": "Hamburg",
"description": "Surprisingly rainy",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(17, 30))
async def test_just_ended_event(mock_now, hass, calendar):
"""Test that the next ongoing event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a normal event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 17:00:00",
"end_time": "2017-11-27 18:00:00",
"location": "Hamburg",
"description": "Surprisingly rainy",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(17, 00))
async def test_ongoing_event_different_tz(mock_now, hass, calendar):
"""Test that the ongoing event with another timezone is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "Enjoy the sun",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 16:30:00",
"description": "Sunny day",
"end_time": "2017-11-27 17:30:00",
"location": "San Francisco",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(19, 10))
async def test_ongoing_floating_event_returned(mock_now, hass, calendar):
"""Test that floating events without timezones work."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
print(dt.DEFAULT_TIME_ZONE)
print(state)
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a floating Event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 19:00:00",
"end_time": "2017-11-27 20:00:00",
"location": "Hamburg",
"description": "What a day",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(8, 30))
async def test_ongoing_event_with_offset(mock_now, hass, calendar):
"""Test that the offset is taken into account."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is an offset event",
"all_day": False,
"offset_reached": True,
"start_time": "2017-11-27 10:00:00",
"end_time": "2017-11-27 11:00:00",
"location": "Hamburg",
"description": "Surprisingly shiny",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(12, 00))
async def test_matching_filter(mock_now, hass, calendar):
"""Test that the matching event is returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": "This is a normal event"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a normal event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 17:00:00",
"end_time": "2017-11-27 18:00:00",
"location": "Hamburg",
"description": "Surprisingly rainy",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(12, 00))
async def test_matching_filter_real_regexp(mock_now, hass, calendar):
"""Test that the event matching the regexp is returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": r".*rainy"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a normal event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 17:00:00",
"end_time": "2017-11-27 18:00:00",
"location": "Hamburg",
"description": "Surprisingly rainy",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(20, 00))
async def test_filter_matching_past_event(mock_now, hass, calendar):
"""Test that the matching past event is not returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": "This is a normal event"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == "off"
@patch("homeassistant.util.dt.now", return_value=_local_datetime(12, 00))
async def test_no_result_with_filtering(mock_now, hass, calendar):
"""Test that nothing is returned since nothing matches."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{
"name": "Private",
"calendar": "Private",
"search": "This is a non-existing event",
}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == "off"
@patch("homeassistant.util.dt.now", return_value=_local_datetime(17, 30))
async def test_all_day_event_returned(mock_now, hass, calendar):
"""Test that the event lasting the whole day is returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": ".*"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is an all day event",
"all_day": True,
"offset_reached": False,
"start_time": "2017-11-27 00:00:00",
"end_time": "2017-11-28 00:00:00",
"location": "Hamburg",
"description": "What a beautiful day",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(21, 45))
async def test_event_rrule(mock_now, hass, calendar):
"""Test that the future recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 22:00:00",
"end_time": "2017-11-27 22:30:00",
"location": "Hamburg",
"description": "Every day for a while",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(22, 15))
async def test_event_rrule_ongoing(mock_now, hass, calendar):
"""Test that the current recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 22:00:00",
"end_time": "2017-11-27 22:30:00",
"location": "Hamburg",
"description": "Every day for a while",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(22, 45))
async def test_event_rrule_duration(mock_now, hass, calendar):
"""Test that the future recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event with a duration",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 23:00:00",
"end_time": "2017-11-27 23:30:00",
"location": "Hamburg",
"description": "Every day for a while as well",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(23, 15))
async def test_event_rrule_duration_ongoing(mock_now, hass, calendar):
"""Test that the ongoing recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event with a duration",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 23:00:00",
"end_time": "2017-11-27 23:30:00",
"location": "Hamburg",
"description": "Every day for a while as well",
}
@patch("homeassistant.util.dt.now", return_value=_local_datetime(23, 37))
async def test_event_rrule_endless(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring event that never ends",
"all_day": False,
"offset_reached": False,
"start_time": "2017-11-27 23:45:00",
"end_time": "2017-11-27 23:59:59",
"location": "Hamburg",
"description": "Every day forever",
}
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2016, 12, 1, 17, 30)),
)
async def test_event_rrule_all_day(mock_now, hass, calendar):
"""Test that the recurring all day event is returned."""
config = dict(CALDAV_CONFIG)
config["custom_calendars"] = [
{"name": "Private", "calendar": "Private", "search": ".*"}
]
assert await async_setup_component(hass, "calendar", {"calendar": config})
await hass.async_block_till_done()
state = hass.states.get("calendar.private_private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is a recurring all day event",
"all_day": True,
"offset_reached": False,
"start_time": "2016-12-01 00:00:00",
"end_time": "2016-12-02 00:00:00",
"location": "Hamburg",
"description": "Groundhog Day",
}
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 0, 15)),
)
async def test_event_rrule_hourly_on_first(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is an hourly recurring event",
"all_day": False,
"offset_reached": False,
"start_time": "2015-11-27 00:00:00",
"end_time": "2015-11-27 00:30:00",
"location": "Hamburg",
"description": "The bell tolls for thee",
}
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 11, 15)),
)
async def test_event_rrule_hourly_on_last(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": "Private",
"message": "This is an hourly recurring event",
"all_day": False,
"offset_reached": False,
"start_time": "2015-11-27 11:00:00",
"end_time": "2015-11-27 11:30:00",
"location": "Hamburg",
"description": "The bell tolls for thee",
}
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 0, 45)),
)
async def test_event_rrule_hourly_off_first(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 11, 45)),
)
async def test_event_rrule_hourly_off_last(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
@patch(
"homeassistant.util.dt.now",
return_value=dt.as_local(datetime.datetime(2015, 11, 27, 12, 15)),
)
async def test_event_rrule_hourly_ended(mock_now, hass, calendar):
"""Test that the endless recurring event is returned."""
assert await async_setup_component(hass, "calendar", {"calendar": CALDAV_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("calendar.private")
assert state.name == calendar.name
assert state.state == STATE_OFF
| |
# This python script will run our simplification (fwd rev) algorithm multiple times
import sys
import os
import copy
import shutil
import fwdrev
import simpl
import pickle
import subprocess
def checkEnvironment():
if len(sys.argv) < 4:
print "usage: python multiexp.py [times to execute] [results dir]",
print "[binary under test] [binary flags]"
print
print "generates an input schedule and then performs the ",
print "iterative forward-reverse algorithm on this schedule",
print "and saves the results"
print
print "Output files:"
print "\tstart-sched: the buggy start schedule (pickled object)"
print "\tsimpl-sched: the simplified schedule (pickled object)"
print "\tstart-relaxed-sched"
print "\tsimpl-relaxed-sched"
print
sys.exit(1)
assert os.environ.get('THRILLE_ROOT')!= None, \
"Thrille root environment variable not defined"
assert int(sys.argv[1]) > 0, "Nonsensical execution time"
assert os.path.exists(sys.argv[2]), "Results directory does not exist"
assert os.path.exists(sys.argv[3]), "binary does not exist"
def getNewErrorSchedule(tr, bin_file, bin_flags):
liblockrace = os.path.join(tr, "bin/liblockrace.so")
librandact = os.path.join(tr, "bin/librandact.so")
assert os.path.exists(liblockrace)
assert os.path.exists(librandact)
binarydir, bin = os.path.split(bin_file)
curr_dir = os.getcwd()
if binarydir != '':
os.chdir(binarydir)
bin = os.path.join(".", bin)
if "thrille-randomactive" not in os.listdir(os.getcwd()):
assert False, "no lock race data to randomactive test"
assert os.path.exists("thrille-randomactive")
count = 0;
while True:
if os.path.exists("thrille-sched"):
os.remove("thrille-sched")
if os.path.exists("thrille-relaxed-sched"):
os.remove("thrille-relaxed-sched")
count += 1
if count > 1000:
raw_input("1000 iterations with no error--continue?")
count = 0
logout = open("randact.log", "w")
os.environ["LD_PRELOAD"] = librandact
args = [bin] + bin_flags
subprocess.call(args, stdout=logout, stderr=logout)
del os.environ["LD_PRELOAD"]
logout.close()
s = fwdrev.Schedule("my-schedule")
if s.error is not None:
os.chdir(curr_dir)
return os.path.join(binarydir, "my-schedule")
def main():
checkEnvironment()
times_to_repeat = int(sys.argv[1])
save_directory = sys.argv[2]
binary_file = sys.argv[3]
tr = os.environ.get('THRILLE_ROOT')
fout = open(os.path.join(save_directory, "simpl-runstat"), "w")
errout = open(os.path.join(save_directory, "error.log"), "w")
my_bin_save = os.path.join(save_directory, "bin")
os.mkdir(my_bin_save)
tmppath, binname = os.path.split(binary_file)
shutil.copy(binary_file, os.path.join(my_bin_save, binname))
shutil.copy(os.path.join(tr, "bin", "libserializer.so"), \
os.path.join(my_bin_save, "libserializer.so"))
shutil.copy(os.path.join(tr, "bin", "libstrictserial.so"), \
os.path.join(my_bin_save, "libstrictserial.so"))
shutil.copy(os.path.join(tr, "bin", "librelaxedserial.so"), \
os.path.join(my_bin_save, "librelaxedserial.so"))
shutil.copy(os.path.join(tr, "bin", "librandomschedule.so"), \
os.path.join(my_bin_save, "librandomschedule.so"))
shutil.copy(os.path.join(tr, "bin", "librandact.so"), \
os.path.join(my_bin_save, "librandact.so"))
shutil.copy(os.path.join(tr, "bin", "librace.so"), \
os.path.join(my_bin_save, "librace.so"))
shutil.copy(os.path.join(tr, "bin", "liblockrace.so"), \
os.path.join(my_bin_save, "liblockrace.so"))
#figure out how to remove svn
#os.mkdir(os.path.join(save_directory, "src"))
#shutil.copytree(os.path.join(tr, "src"), \
# os.path.join(save_directory,"src","src")) \
#shutil.copytree(os.path.join(tr, "scripts"), \
# os.path.join(save_directory,"src","scripts"))
fout.write("Command that was run:\n")
for x in sys.argv:
fout.write(x + " ")
fout.write("\n\n")
#lists for tracking statistics
start_list = []
end_list = []
i = 0
while i < times_to_repeat:
print "**EXPERIMENT", i
my_save_dir = ""
if (i < 10):
my_save_dir = os.path.join(save_directory, "run0" + str(i))
else:
my_save_dir = os.path.join(save_directory, "run" + str(i))
os.mkdir(my_save_dir)
error_trace = getNewErrorSchedule(tr, binary_file, sys.argv[4:])
start_trace = os.path.join(my_save_dir, "start-trace")
shutil.copy(error_trace, start_trace)
startsched = fwdrev.Schedule(error_trace)
start_list.append(copy.deepcopy(startsched))
start_save = os.path.join(my_save_dir, "start-sched")
start_relax = os.path.join(my_save_dir, "start-relaxed-sched")
pickle.dump(startsched, open(start_save, "w"))
startsched.outputRelaxedSchedule(start_relax)
s = simpl.Simplifier(tr, error_trace, binary_file, sys.argv[4:])
donesched = fwdrev.Schedule()
attempts = 0
while attempts < 3:
try:
donesched = s.simplify()
assert donesched != fwdrev.Schedule()
break
except AssertionError:
donesched = fwdrev.Schedule()
errout.write("Retrying Simplify of Iteration " + str(i) + "\n")
errout.write("\terror: " + startsched.error + "\n\n")
attempts += 1
end_list.append(copy.deepcopy(donesched))
assert donesched.error == startsched.error
assert donesched.addrlist == startsched.addrlist
assert donesched.getScheduleLength() <= startsched.getScheduleLength()
assert len(start_list) == len(end_list)
startstr = startsched.getSummaryInfo()
donestr = donesched.getSummaryInfo()
print
print "Error:", donesched.error
print
print "Start Schedule:"
print startstr
print
print "Simplified Schedule:"
print donestr
print
tmpout = open(os.path.join(my_save_dir, "README"), "w")
sys.stdout = tmpout
print "Error:", donesched.error
print
print "Start Schedule:"
print startstr
print
print "Simplified Schedule:"
print donestr
print
sys.stdout.flush()
sys.stdout = sys.__stdout__
tmpout.close()
fout.write("**RUN " + str(i) + "\n")
sys.stdout = fout
print "Error:", donesched.error
print
print "Start Schedule:"
print startstr
print
print "Simplified Schedule:"
print donestr
print
fout.write("\n")
sys.stdout.flush()
sys.stdout = sys.__stdout__
i += 1
simpl_save = os.path.join(my_save_dir, "simpl-sched")
simpl_relax = os.path.join(my_save_dir, "simpl-relaxed-sched")
pickle.dump(donesched, open(simpl_save, "w"))
donesched.outputRelaxedSchedule(simpl_relax)
fout.close()
if __name__ == "__main__":
main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from unittest import TestCase
from qpid.datatypes import *
from qpid.ops import DeliveryProperties, FragmentProperties, MessageProperties
class SerialTest(TestCase):
def test(self):
for s in (serial(0), serial(0x8FFFFFFFL), serial(0xFFFFFFFFL)):
assert s + 1 > s
assert s - 1 < s
assert s < s + 1
assert s > s - 1
assert serial(0xFFFFFFFFL) + 1 == serial(0)
assert min(serial(0xFFFFFFFFL), serial(0x0)) == serial(0xFFFFFFFFL)
assert max(serial(0xFFFFFFFFL), serial(0x0)) == serial(0x0)
def testIncr(self):
s = serial(0)
s += 1
assert s == serial(1)
def testIn(self):
l = [serial(1), serial(2), serial(3), serial(4)]
assert serial(1) in l
assert serial(0xFFFFFFFFL + 2) in l
assert 4 in l
def testNone(self):
assert serial(0) != None
def testHash(self):
d = {}
d[serial(0)] = "zero"
assert d[0] == "zero"
def testAdd(self):
assert serial(2) + 2 == serial(4)
assert serial(2) + 2 == 4
def testSub(self):
delta = serial(4) - serial(2)
assert isinstance(delta, int) or isinstance(delta, long)
assert delta == 2
delta = serial(4) - 2
assert isinstance(delta, Serial)
assert delta == serial(2)
class RangedSetTest(TestCase):
def check(self, ranges):
posts = []
for range in ranges:
posts.append(range.lower)
posts.append(range.upper)
sorted = posts[:]
sorted.sort()
assert posts == sorted
idx = 1
while idx + 1 < len(posts):
assert posts[idx] + 1 != posts[idx+1]
idx += 2
def test(self):
rs = RangedSet()
self.check(rs.ranges)
rs.add(1)
assert 1 in rs
assert 2 not in rs
assert 0 not in rs
self.check(rs.ranges)
rs.add(2)
assert 0 not in rs
assert 1 in rs
assert 2 in rs
assert 3 not in rs
self.check(rs.ranges)
rs.add(0)
assert -1 not in rs
assert 0 in rs
assert 1 in rs
assert 2 in rs
assert 3 not in rs
self.check(rs.ranges)
rs.add(37)
assert -1 not in rs
assert 0 in rs
assert 1 in rs
assert 2 in rs
assert 3 not in rs
assert 36 not in rs
assert 37 in rs
assert 38 not in rs
self.check(rs.ranges)
rs.add(-1)
self.check(rs.ranges)
rs.add(-3)
self.check(rs.ranges)
rs.add(1, 20)
assert 21 not in rs
assert 20 in rs
self.check(rs.ranges)
def testAddSelf(self):
a = RangedSet()
a.add(0, 8)
self.check(a.ranges)
a.add(0, 8)
self.check(a.ranges)
assert len(a.ranges) == 1
range = a.ranges[0]
assert range.lower == 0
assert range.upper == 8
def testEmpty(self):
s = RangedSet()
assert s.empty()
s.add(0, -1)
assert s.empty()
s.add(0, 0)
assert not s.empty()
def testMinMax(self):
s = RangedSet()
assert s.max() is None
assert s.min() is None
s.add(0, 10)
assert s.max() == 10
assert s.min() == 0
s.add(0, 5)
assert s.max() == 10
assert s.min() == 0
s.add(0, 11)
assert s.max() == 11
assert s.min() == 0
s.add(15, 20)
assert s.max() == 20
assert s.min() == 0
s.add(-10, -5)
assert s.max() == 20
assert s.min() == -10
class RangeTest(TestCase):
def testIntersect1(self):
a = Range(0, 10)
b = Range(9, 20)
i1 = a.intersect(b)
i2 = b.intersect(a)
assert i1.upper == 10
assert i2.upper == 10
assert i1.lower == 9
assert i2.lower == 9
def testIntersect2(self):
a = Range(0, 10)
b = Range(11, 20)
assert a.intersect(b) == None
assert b.intersect(a) == None
def testIntersect3(self):
a = Range(0, 10)
b = Range(3, 5)
i1 = a.intersect(b)
i2 = b.intersect(a)
assert i1.upper == 5
assert i2.upper == 5
assert i1.lower == 3
assert i2.lower == 3
class UUIDTest(TestCase):
def test(self):
# this test is kind of lame, but it does excercise the basic
# functionality of the class
u = uuid4()
for i in xrange(1024):
assert u != uuid4()
class MessageTest(TestCase):
def setUp(self):
self.mp = MessageProperties()
self.dp = DeliveryProperties()
self.fp = FragmentProperties()
def testHas(self):
m = Message(self.mp, self.dp, self.fp, "body")
assert m.has("message_properties")
assert m.has("delivery_properties")
assert m.has("fragment_properties")
def testGet(self):
m = Message(self.mp, self.dp, self.fp, "body")
assert m.get("message_properties") == self.mp
assert m.get("delivery_properties") == self.dp
assert m.get("fragment_properties") == self.fp
def testSet(self):
m = Message(self.mp, self.dp, "body")
assert m.get("fragment_properties") is None
m.set(self.fp)
assert m.get("fragment_properties") == self.fp
def testSetOnEmpty(self):
m = Message("body")
assert m.get("delivery_properties") is None
m.set(self.dp)
assert m.get("delivery_properties") == self.dp
def testSetReplace(self):
m = Message(self.mp, self.dp, self.fp, "body")
dp = DeliveryProperties()
assert m.get("delivery_properties") == self.dp
assert m.get("delivery_properties") != dp
m.set(dp)
assert m.get("delivery_properties") != self.dp
assert m.get("delivery_properties") == dp
def testClear(self):
m = Message(self.mp, self.dp, self.fp, "body")
assert m.get("message_properties") == self.mp
assert m.get("delivery_properties") == self.dp
assert m.get("fragment_properties") == self.fp
m.clear("fragment_properties")
assert m.get("fragment_properties") is None
assert m.get("message_properties") == self.mp
assert m.get("delivery_properties") == self.dp
class TimestampTest(TestCase):
def check(self, expected, *values):
for v in values:
assert isinstance(v, timestamp)
assert v == expected
assert v == timestamp(expected)
def testAdd(self):
self.check(4.0,
timestamp(2.0) + 2.0,
2.0 + timestamp(2.0))
def testSub(self):
self.check(2.0,
timestamp(4.0) - 2.0,
4.0 - timestamp(2.0))
def testNeg(self):
self.check(-4.0, -timestamp(4.0))
def testPos(self):
self.check(+4.0, +timestamp(4.0))
def testAbs(self):
self.check(4.0, abs(timestamp(-4.0)))
def testConversion(self):
dt = timestamp(0).datetime()
t = timestamp(dt)
assert t == 0
| |
import string
import sys
import warnings
import numpy as np
import pandas as pd
from .pandas_vb_common import tm
try:
from pandas.api.types import union_categoricals
except ImportError:
try:
from pandas.types.concat import union_categoricals
except ImportError:
pass
class Constructor:
def setup(self):
N = 10 ** 5
self.categories = list("abcde")
self.cat_idx = pd.Index(self.categories)
self.values = np.tile(self.categories, N)
self.codes = np.tile(range(len(self.categories)), N)
self.datetimes = pd.Series(
pd.date_range("1995-01-01 00:00:00", periods=N / 10, freq="s")
)
self.datetimes_with_nat = self.datetimes.copy()
self.datetimes_with_nat.iloc[-1] = pd.NaT
self.values_some_nan = list(np.tile(self.categories + [np.nan], N))
self.values_all_nan = [np.nan] * len(self.values)
self.values_all_int8 = np.ones(N, "int8")
self.categorical = pd.Categorical(self.values, self.categories)
self.series = pd.Series(self.categorical)
self.intervals = pd.interval_range(0, 1, periods=N // 10)
def time_regular(self):
pd.Categorical(self.values, self.categories)
def time_fastpath(self):
pd.Categorical(self.codes, self.cat_idx, fastpath=True)
def time_datetimes(self):
pd.Categorical(self.datetimes)
def time_interval(self):
pd.Categorical(self.datetimes, categories=self.datetimes)
def time_datetimes_with_nat(self):
pd.Categorical(self.datetimes_with_nat)
def time_with_nan(self):
pd.Categorical(self.values_some_nan)
def time_all_nan(self):
pd.Categorical(self.values_all_nan)
def time_from_codes_all_int8(self):
pd.Categorical.from_codes(self.values_all_int8, self.categories)
def time_existing_categorical(self):
pd.Categorical(self.categorical)
def time_existing_series(self):
pd.Categorical(self.series)
class AsType:
def setup(self):
N = 10 ** 5
random_pick = np.random.default_rng().choice
categories = {
"str": list(string.ascii_letters),
"int": np.random.randint(2 ** 16, size=154),
"float": sys.maxsize * np.random.random((38,)),
"timestamp": [
pd.Timestamp(x, unit="s") for x in np.random.randint(2 ** 18, size=578)
],
}
self.df = pd.DataFrame(
{col: random_pick(cats, N) for col, cats in categories.items()}
)
for col in ("int", "float", "timestamp"):
self.df[col + "_as_str"] = self.df[col].astype(str)
for col in self.df.columns:
self.df[col] = self.df[col].astype("category")
def astype_str(self):
[self.df[col].astype("str") for col in "int float timestamp".split()]
def astype_int(self):
[self.df[col].astype("int") for col in "int_as_str timestamp".split()]
def astype_float(self):
[
self.df[col].astype("float")
for col in "float_as_str int int_as_str timestamp".split()
]
def astype_datetime(self):
self.df["float"].astype(pd.DatetimeTZDtype(tz="US/Pacific"))
class Concat:
def setup(self):
N = 10 ** 5
self.s = pd.Series(list("aabbcd") * N).astype("category")
self.a = pd.Categorical(list("aabbcd") * N)
self.b = pd.Categorical(list("bbcdjk") * N)
self.idx_a = pd.CategoricalIndex(range(N), range(N))
self.idx_b = pd.CategoricalIndex(range(N + 1), range(N + 1))
self.df_a = pd.DataFrame(range(N), columns=["a"], index=self.idx_a)
self.df_b = pd.DataFrame(range(N + 1), columns=["a"], index=self.idx_b)
def time_concat(self):
pd.concat([self.s, self.s])
def time_union(self):
union_categoricals([self.a, self.b])
def time_append_overlapping_index(self):
self.idx_a.append(self.idx_a)
def time_append_non_overlapping_index(self):
self.idx_a.append(self.idx_b)
def time_concat_overlapping_index(self):
pd.concat([self.df_a, self.df_a])
def time_concat_non_overlapping_index(self):
pd.concat([self.df_a, self.df_b])
class ValueCounts:
params = [True, False]
param_names = ["dropna"]
def setup(self, dropna):
n = 5 * 10 ** 5
arr = [f"s{i:04d}" for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype("category")
def time_value_counts(self, dropna):
self.ts.value_counts(dropna=dropna)
class Repr:
def setup(self):
self.sel = pd.Series(["s1234"]).astype("category")
def time_rendering(self):
str(self.sel)
class SetCategories:
def setup(self):
n = 5 * 10 ** 5
arr = [f"s{i:04d}" for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype("category")
def time_set_categories(self):
self.ts.cat.set_categories(self.ts.cat.categories[::2])
class RemoveCategories:
def setup(self):
n = 5 * 10 ** 5
arr = [f"s{i:04d}" for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype("category")
def time_remove_categories(self):
self.ts.cat.remove_categories(self.ts.cat.categories[::2])
class Rank:
def setup(self):
N = 10 ** 5
ncats = 100
self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)
self.s_str_cat = pd.Series(self.s_str, dtype="category")
with warnings.catch_warnings(record=True):
str_cat_type = pd.CategoricalDtype(set(self.s_str), ordered=True)
self.s_str_cat_ordered = self.s_str.astype(str_cat_type)
self.s_int = pd.Series(np.random.randint(0, ncats, size=N))
self.s_int_cat = pd.Series(self.s_int, dtype="category")
with warnings.catch_warnings(record=True):
int_cat_type = pd.CategoricalDtype(set(self.s_int), ordered=True)
self.s_int_cat_ordered = self.s_int.astype(int_cat_type)
def time_rank_string(self):
self.s_str.rank()
def time_rank_string_cat(self):
self.s_str_cat.rank()
def time_rank_string_cat_ordered(self):
self.s_str_cat_ordered.rank()
def time_rank_int(self):
self.s_int.rank()
def time_rank_int_cat(self):
self.s_int_cat.rank()
def time_rank_int_cat_ordered(self):
self.s_int_cat_ordered.rank()
class IsMonotonic:
def setup(self):
N = 1000
self.c = pd.CategoricalIndex(list("a" * N + "b" * N + "c" * N))
self.s = pd.Series(self.c)
def time_categorical_index_is_monotonic_increasing(self):
self.c.is_monotonic_increasing
def time_categorical_index_is_monotonic_decreasing(self):
self.c.is_monotonic_decreasing
def time_categorical_series_is_monotonic_increasing(self):
self.s.is_monotonic_increasing
def time_categorical_series_is_monotonic_decreasing(self):
self.s.is_monotonic_decreasing
class Contains:
def setup(self):
N = 10 ** 5
self.ci = tm.makeCategoricalIndex(N)
self.c = self.ci.values
self.key = self.ci.categories[0]
def time_categorical_index_contains(self):
self.key in self.ci
def time_categorical_contains(self):
self.key in self.c
class CategoricalSlicing:
params = ["monotonic_incr", "monotonic_decr", "non_monotonic"]
param_names = ["index"]
def setup(self, index):
N = 10 ** 6
categories = ["a", "b", "c"]
values = [0] * N + [1] * N + [2] * N
if index == "monotonic_incr":
self.data = pd.Categorical.from_codes(values, categories=categories)
elif index == "monotonic_decr":
self.data = pd.Categorical.from_codes(
list(reversed(values)), categories=categories
)
elif index == "non_monotonic":
self.data = pd.Categorical.from_codes([0, 1, 2] * N, categories=categories)
else:
raise ValueError(f"Invalid index param: {index}")
self.scalar = 10000
self.list = list(range(10000))
self.cat_scalar = "b"
def time_getitem_scalar(self, index):
self.data[self.scalar]
def time_getitem_slice(self, index):
self.data[: self.scalar]
def time_getitem_list_like(self, index):
self.data[[self.scalar]]
def time_getitem_list(self, index):
self.data[self.list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
class Indexing:
def setup(self):
N = 10 ** 5
self.index = pd.CategoricalIndex(range(N), range(N))
self.series = pd.Series(range(N), index=self.index).sort_index()
self.category = self.index[500]
def time_get_loc(self):
self.index.get_loc(self.category)
def time_shallow_copy(self):
self.index._view()
def time_align(self):
pd.DataFrame({"a": self.series, "b": self.series[:500]})
def time_intersection(self):
self.index[:750].intersection(self.index[250:])
def time_unique(self):
self.index.unique()
def time_reindex(self):
self.index.reindex(self.index[:500])
def time_reindex_missing(self):
self.index.reindex(["a", "b", "c", "d"])
def time_sort_values(self):
self.index.sort_values(ascending=False)
class SearchSorted:
def setup(self):
N = 10 ** 5
self.ci = tm.makeCategoricalIndex(N).sort_values()
self.c = self.ci.values
self.key = self.ci.categories[1]
def time_categorical_index_contains(self):
self.ci.searchsorted(self.key)
def time_categorical_contains(self):
self.c.searchsorted(self.key)
from .pandas_vb_common import setup # noqa: F401 isort:skip
| |
"""
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import os
import time
import logging
import tempfile
import yaml
import paramiko
import configparser
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.executor.playbook_executor import PlaybookExecutor
from sonsmbase.smbase import sonSMbase
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
class faceFSM(sonSMbase):
username = 'sonata'
#keyfile = '../ansible/roles/squid/files/sonata.pem'
password = 'sonata'
monitoring_file = './node.conf'
alternate_squid_cfg_file = './ansible/roles/squid/files/squid.conf'
with_monitoring = True
option = 1
def __init__(self):
LOG.debug('Initialization of faceFSM in %s', __file__)
"""
:param specific_manager_type: specifies the type of specific manager
that could be either fsm or ssm.
:param service_name: the name of the service that this specific manager
belongs to.
:param function_name: the name of the function that this specific
manager belongs to, will be null in SSM case
:param specific_manager_name: the actual name of specific manager
(e.g., scaling, placement)
:param id_number: the specific manager id number which is used to
distinguish between multiple SSM/FSM that are created for the same
objective (e.g., scaling with algorithm 1 and 2)
:param version: version
:param description: description
"""
#if 'KEYFILE' in os.environ:
# keyfile = os.environ['KEYFILE']
self.specific_manager_type = 'fsm'
#self.service_name = 'psa'
#self.function_name = 'proxy'
self.specific_manager_name = 'prx-config'
self.service_name = 'psaservice'
self.function_name = 'prx-vnf'
self.id_number = '1'
self.version = 'v0.1'
self.description = 'FSM that implements the subscription of the start, stop, configuration topics'
self.topic = ''
super(self.__class__, self).__init__(specific_manager_type = self.specific_manager_type,
service_name = self.service_name,
function_name = self.function_name,
specific_manager_name = self.specific_manager_name,
id_number = self.id_number,
version = self.version,
description = self.description)
def on_registration_ok(self):
LOG.debug("Received registration ok event for %s", __file__)
state = "Subscription successful, I'm waiting for messages"
message = {'name': self.specific_manager_id,
'status': state}
self.manoconn.publish(topic = 'specific.manager.registry.ssm.status',
message = yaml.dump(message))
self.topic = "generic.fsm." + str(self.sfuuid)
self.manoconn.subscribe(self.message_received, self.topic)
LOG.info("Subscribed to " + self.topic + " topic.")
def message_received(self, ch, method, props, payload):
LOG.debug("Received message in %s", __file__)
"""
handling of the different possible messages
"""
request = yaml.load(payload)
if "fsm_type" not in request.keys():
LOG.info("Received a non-request message, ignoring...")
return
response = None
# if self.private_key == None:
# LOG.info("private key with value null")
# return
if str(request["fsm_type"]) == "start":
LOG.info("Start event received: " + str(request["content"]))
response = self.start_ev(request["content"])
elif str(request["fsm_type"]) == "stop":
LOG.info("Stop event received: " + str(request["content"]))
response = self.stop_ev(request["content"])
elif str(request["fsm_type"]) == "configure":
LOG.info("Config event received: " + str(request["content"]))
response = self.configure_ev(request["content"])
elif str(request["fsm_type"]) == "scale":
LOG.info("Scale event received: " + str(request["content"]))
response = self.scale_ev(request["content"])
if response is not None:
# Generated response for the FLM
LOG.info("Response to request generated:" + str(response))
#topic = "generic.fsm." + str(self.sfuuid)
corr_id = props.correlation_id
self.manoconn.notify(self.topic,
yaml.dump(response),
correlation_id = corr_id)
return
LOG.info("Request received for other type of FSM, ignoring...")
def start_ev(self, content):
LOG.info("Performing life cycle start event with content = %s", str(content.keys()))
vnfr = content["vnfr"]
vnfd = content["vnfd"]
LOG.info("VNFR: " + yaml.dump(vnfr))
vdu = vnfr['virtual_deployment_units'][0]
cpts = vdu['vnfc_instance'][0]['connection_points']
squid_ip = None
for cp in cpts:
if cp['type'] == 'management':
squid_ip = cp['interface']['address']
LOG.info("management ip: " + str(squid_ip))
if squid_ip is not None:
plbk = ''
if self.option == 0:
self.playbook_execution(plbk, squid_ip)
else:
opt = 0
self.ssh_execution(opt, squid_ip)
else:
LOG.info("No management connection point in vnfr")
response = {}
response['status'] = 'COMPLETED'
return response
def stop_ev(self, content):
LOG.info("Performing life cycle stop event with content = %s", str(content.keys()))
vnfr = content["vnfr"]
vnfd = content["vnfd"]
LOG.info("VNFR: " + yaml.dump(vnfr))
vdu = vnfr['virtual_deployment_units'][0]
cpts = vdu['vnfc_instance'][0]['connection_points']
squid_ip = None
for cp in cpts:
if cp['type'] == 'management':
squid_ip = cp['interface']['address']
LOG.info("management ip: " + str(squid_ip))
if squid_ip is not None:
plbk = ''
if self.option == 0:
self.playbook_execution(plbk, squid_ip)
else:
opt = 1
self.ssh_execution(opt, squid_ip)
else:
LOG.info("No management connection point in vnfr")
response = {}
response['status'] = 'COMPLETED'
return response
def configure_ev(self, content):
LOG.info("Configuration event with content = %s", str(content.keys()))
vnfr = content["vnfr"]
vnfd = content["vnfd"]
LOG.info("VNFR: " + yaml.dump(vnfr))
vdu = vnfr['virtual_deployment_units'][0]
cpts = vdu['vnfc_instance'][0]['connection_points']
squid_ip = None
for cp in cpts:
if cp['type'] == 'management':
squid_ip = cp['interface']['address']
LOG.info("management ip: " + str(squid_ip))
if squid_ip is not None:
plbk = '../ansible/site.yml'
if self.option == 0:
self.playbook_execution(plbk, squid_ip)
else:
opt = 2
self.ssh_execution(opt, squid_ip)
else:
LOG.info("No management connection point in vnfr")
response = {}
response['status'] = 'COMPLETED'
response['IP'] = squid_ip
return response
def scale_ev(self, content):
LOG.info("Scale event with content = %s", str(content.keys()))
vnfr = content["vnfr"]
vnfd = content["vnfd"]
LOG.info("VNFR: " + yaml.dump(vnfr))
vdu = vnfr['virtual_deployment_units'][0]
cpts = vdu['vnfc_instance'][0]['connection_points']
squid_ip = None
for cp in cpts:
if cp['type'] == 'management':
squid_ip = cp['interface']['address']
LOG.info("management ip: " + str(squid_ip))
if squid_ip is not None:
plbk = ''
if self.option == 0:
self.playbook_execution(plbk, squid_ip)
else:
opt = 3
self.ssh_execution(opt, squid_ip)
else:
LOG.info("No management connection point in vnfr")
response = {}
response['status'] = 'COMPLETED'
response['IP'] = squid_ip
return response
def playbook_execution(self, playbook, host_ip):
LOG.info("Executing playbook: %s", playbook)
loader = DataLoader()
inventory = None
with tempfile.NamedTemporaryFile() as fp:
fp.write(host_ip.encode('utf-8'))
fp.flush()
inventory = InventoryManager(loader=loader, sources=[fp.name])
variable_manager = VariableManager(loader = loadder, inventory = inventory)
if not os.path.exists(playbook):
LOG.error('The playbook %s does not exist', playbook)
return
Options = namedtuple('Options',
['listtags', 'listtasks', 'listhosts',
'syntax', 'connection', 'module_path',
'forks', 'remote_user', 'private_key_file',
'ssh_common_args', 'ssh_extra_args',
'sftp_extra_args', 'scp_extra_args',
'become', 'become_method', 'become_user',
'verbosity', 'check'])
options = Options(listtags = False, listtasks = False, listhosts = False,
syntax = False, connection = 'ssh', module_path = None,
forks = 100, remote_user = 'slotlocker',
private_key_file = None, ssh_common_args = None,
ssh_extra_args = None, sftp_extra_args = None,
scp_extra_args = None, become = True,
become_method = None, become_user = 'root',
verbosity = None, check = False)
variable_manager.extra_vars = {'hosts': host_ip}
passwords = {}
pbex = PlaybookExecutor(playbooks = [playbook],
inventory = inventory,
variable_manager = variable_manager,
loader = loader,
options = options,
passwords = passwords)
results = pbex.run()
return
def ssh_execution(self, function, host_ip):
LOG.info("Executing ssh connection with function: %s", function)
num_retries = 20
ssh = paramiko.SSHClient()
LOG.info("SSH client start for user %s", self.username)
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.load_system_host_keys()
retry = 0
while retry < num_retries:
try:
# ssh.connect(host_ip, username = self.username, pkey = self.private_key)
ssh.connect(host_ip, username = self.username, password = self.password)
break
except paramiko.BadHostKeyException:
LOG.info("%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname)
retry += 1
except EOFError:
LOG.info('Unexpected Error from SSH Connection, retry in 5 seconds')
time.sleep(10)
retry += 1
except:
LOG.info('SSH Connection refused from %s, will retry in 5 seconds', host_ip)
time.sleep(10)
retry += 1
if retry == num_retries:
LOG.info('Could not establish SSH connection within max retries')
return;
if function == 0:
LOG.info("SSH connection established")
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo service squid start')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo mv /opt/monitoring /opt/Monitoring')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
retry = 0
if self.with_monitoring == True:
ftp = ssh.open_sftp()
LOG.info("SFTP connection established")
self.createConf(host_ip, 4, 'cache-vnf')
localpath = self.monitoring_file
LOG.info("SFTP connection entering on %s", localpath)
remotepath = '/tmp/node.conf'
sftpa = ftp.put(localpath, remotepath)
ftp.close()
LOG.info("SSH connection reestablished")
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo cp /tmp/node.conf /opt/Monitoring')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo systemctl restart mon-probe.service')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
ssh.close()
elif function == 1:
LOG.info("SSH client stop")
LOG.info("SSH connection established")
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo service squid stop')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
ssh.close()
elif function == 2:
LOG.info("SSH client configure")
LOG.info("SSH connection established")
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo service squid stop')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
ftp = ssh.open_sftp()
LOG.info("SFTP connection established")
localpath = self.alternate_squid_cfg_file
LOG.info("SFTP connection entering on %s", localpath)
remotepath = '/tmp/squid.conf'
sftpa = ftp.put(localpath, remotepath)
ftp.close()
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo mv /etc/squid3/squid.conf /etc/squid3/squid.conf.old')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo cp /tmp/squid.conf /etc/squid3')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo service squid restart')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
ssh.close()
elif function == 3:
LOG.info("SSH client scale")
LOG.info("SSH connection established")
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('sudo service squid start')
LOG.info('output from remote: ' + str(ssh_stdout))
LOG.info('output from remote: ' + str(ssh_stdin))
LOG.info('output from remote: ' + str(ssh_stderr))
ssh.close()
else:
LOG.info("Invalid operation on FSM %s", function)
return
def createConf(self, pw_ip, interval, name):
#config = configparser.RawConfigParser()
config = configparser.ConfigParser(interpolation = None)
config.add_section('vm_node')
config.add_section('Prometheus')
config.set('vm_node', 'node_name', name)
config.set('vm_node', 'post_freq', str(interval))
config.set('Prometheus', 'server_url', 'http://' + pw_ip + ':9091/metrics')
with open('node.conf', 'w') as configfile: # save
config.write(configfile)
f = open('node.conf', 'r')
LOG.debug('Mon Config-> ' + "\n" + f.read())
f.close()
def main():
faceFSM()
while True:
time.sleep(10)
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import logging
from operator import itemgetter
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template import Template, Context
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.translation import string_concat
from helpfulfields.settings import MAX_NUM_RELATIONS
from helpfulfields.text import (seo_fieldset_label, changetracking_fieldset_label,
dates_fieldset_label, view_on_site_label,
object_not_mounted, logentry_label,
logentry_empty)
logger = logging.getLogger(__name__)
#: a fieldset for use in a :class:`~django.contrib.admin.ModelAdmin`
#: :attr:`~django.contrib.admin.ModelAdmin.fieldsets` definition
#: to display objects which are making use of the
#: :attr:`~helpfulfields.models.Titles.title` and
#: :attr:`~helpfulfields.models.Titles.menu_title` provided by
#: :class:`~helpfulfields.models.Titles`.
#: This fieldset does not provide a name, as the field names should be self
#: descriptive at a very basic level.
titles_fieldset = [
None, {
'classes': [],
'fields': [
'title',
'menu_title',
]
}
]
#: a fieldset for use in a :class:`~django.contrib.admin.ModelAdmin`
#: :attr:`~django.contrib.admin.ModelAdmin.fieldsets` definition
#: to display objects which are making use of the
#: :attr:`~helpfulfields.models.Publishing.is_published` field provided by
#: :class:`~helpfulfields.models.Publishing`.
#: This fieldset does not provide a name, because it doesn't make much sense
#: for one field.
publishing_fieldset = [
None, {
'classes': [],
'fields': [
'is_published',
]
}
]
#: a fieldset for use in a :class:`~django.contrib.admin.ModelAdmin`
#: :attr:`~django.contrib.admin.ModelAdmin.fieldsets` definition
#: to display objects which are making use of the
#: :attr:`~helpfulfields.models.DatePublishing.publish_on` and
#: :attr:`~helpfulfields.models.DatePublishing.unpublish_on` provided by
#: :class:`~helpfulfields.models.DatePublishing`.
#: The fieldset provides a translated name via
#: :attr:`~helpfulfields.text.dates_fieldset_label`
date_publishing_fieldset = [
dates_fieldset_label, {
'classes': [],
'fields': [
'publish_on',
'unpublish_on',
]
}
]
#: a fieldset for use in a :class:`~django.contrib.admin.ModelAdmin`
#: :attr:`~django.contrib.admin.ModelAdmin.fieldsets` definition
#: to display objects which are making use of the
#: :attr:`~helpfulfields.models.SEO.meta_title`,
#: :attr:`~helpfulfields.models.SEO.meta_description` and
#: :attr:`~helpfulfields.models.SEO.meta_keywords` provided by
#: :class:`~helpfulfields.models.SEO`.
#: The fieldset provides a translated name via
#: :attr:`~helpfulfields.text.seo_fieldset_label`, and collapses itself by
#: default.
seo_fieldset = [
seo_fieldset_label, {
'classes': [
'collapse'
],
'fields': [
'meta_title',
'meta_description',
'meta_keywords',
]
}
]
#: a list for use in a :class:`~django.contrib.admin.ModelAdmin`'s
#: :attr:`~django.contrib.admin.ModelAdmin.readonly_fields` configuration
#: to avoid allowing editing of the
#: :attr:`~helpfulfields.models.ChangeTracking.created` and
#: :attr:`~helpfulfields.models.ChangeTracking.modified` fields provided by
#: :class:`~helpfulfields.models.ChangeTracking`
changetracking_readonlys = ['created', 'modified']
#: a fieldset for use in a :class:`~django.contrib.admin.ModelAdmin`
#: :attr:`~django.contrib.admin.ModelAdmin.fieldsets` definition
#: to display objects which are making use of the
#: :attr:`~helpfulfields.models.ChangeTracking.created` and
#: :attr:`~helpfulfields.models.ChangeTracking.modified` fields provided by
#: :class:`~helpfulfields.models.ChangeTracking`.
#: The fieldset provides a translated name via
#: :attr:`~helpfulfields.text.changetracking_fieldset_label`, and starts out
#: collapsed as the data is unimportant.
changetracking_fieldset = [
changetracking_fieldset_label, {
'classes': [
'collapse'
],
'fields': [
'created',
'modified',
]
}
]
class ViewOnSite(object):
"""
An object capable of being used in the
:class:`~django.contrib.admin.ModelAdmin`
:attr:`~django.contrib.admin.ModelAdmin.list_display` to enable a link to
the current object on the frontend of the website::
class MyModelAdmin(ModelAdmin):
list_display = ['pk', ViewOnSite('column name', 'view on site!')]
which shows a link to view an object on the live site, assuming the `obj`
has :meth:`~django.db.models.Model.get_absolute_url` defined.
:test case: :class:`helpfulfields.tests.ViewOnSiteTestCase`
"""
def __init__(self, text=view_on_site_label, label=view_on_site_label):
"""
:param text: The text to display for each item, eg: "View on site"
:param label: the short description for the
:meth:`~django.contrib.admin.ModelAdmin.changelist_view`
changelist column.
"""
self.short_description = label
self.__name__ = label
self.text = text
self.allow_tags = True
def __call__(self, obj):
"""
link to view an object on the live site, assuming the `obj`
has :meth:`~django.db.models.Model.get_absolute_url` defined.
:param obj: the current object in the changelist loop.
:return: a link for viewing on the site.
:rtype: unicode string.
"""
if not hasattr(obj, 'get_absolute_url'):
return u''
output = (u'<a href="../../r/%(content_type)d/%(pk)d/" class="'
u'changelist-viewsitelink">%(text)s</a>')
return output % {
u'content_type': ContentType.objects.get_for_model(obj).pk,
u'pk': obj.pk,
u'text': escape(force_unicode(self.text))
}
class RelationCount(object):
"""
An object capable of being used in the
:class:`~django.contrib.admin.ModelAdmin`
:attr:`~django.contrib.admin.ModelAdmin.list_display` to enable a count of
related items::
class MyModelAdmin(ModelAdmin):
list_display = ['pk', RelationCount('relation_name', 'item count')]
which adds a new column to the admin which shows the results of
``obj.accessor.count()`` and the verbose name.
.. note::
We expect to be able to address the relation from the ``obj`` instance.
As such, reverse relations denied via setting a ``related_name`` of ``+``
won't work.
.. warning::
This should result in a maximum of **one** additional query being
executed, *per object, per usage*, to get a count of related objects.
:test case: :class:`helpfulfields.tests.RelationCountTestCase`
"""
def __init__(self, accessor, label):
"""
:param accessor: The attribute to look for on each ``obj`` (Model instance)
:param label: the short description for the
:meth:`~django.contrib.admin.ModelAdmin.changelist_view`
changelist column.
"""
self.accessor = accessor
self.short_description = label
self.__name__ = label
def __call__(self, obj):
"""
adds a new column to the admin which shows the results of
``obj.accessor.count()`` and the verbose name.
.. note::
Doesn't currently handle pluralisation properly.
:param obj: the current object in the changelist loop.
:return: a count and verbose name, eg: *3 categories*.
:rtype: unicode string.
"""
relation = getattr(obj, self.accessor)
self._relcount = relation.count()
self._vname = obj._meta.get_field_by_name(self.accessor)[0].opts.verbose_name,
return u'%(count)d %(verbose_name)s' % {
'count': self._relcount,
'verbose_name': self._vname,
}
class RelationList(object):
"""
An object capable of being used in the
:class:`~django.contrib.admin.ModelAdmin`
:attr:`~django.contrib.admin.ModelAdmin.list_display` to show a linked list
of related items::
class MyModelAdmin(ModelAdmin):
list_display = ['pk', RelationList('accessor', 'item count')]
which adds a new column to the admin which shows the results of
``obj.accessor.all()`` as links to the appropriate modeladmin page.
.. note::
We expect to be able to address the relation from the ``obj`` instance.
As such, reverse relations denied via setting a ``related_name`` of ``+``
won't work.
.. warning::
It is worth highlighting that this should result in a maximum
of **one** additional query being executed, *per object, per usage*, to
get list of related objects. Changing the
:class:`~django.contrib.admin.ModelAdmin` to use
:meth:`~django.db.models.query.QuerySet.select_related` and/or
:meth:`~django.db.models.query.QuerySet.prefetch_related` may remove
this extra query.
:test case: :class:`helpfulfields.tests.RelationListTestCase`
"""
def __init__(self, accessor, label, max_num=MAX_NUM_RELATIONS,
more_separator=None, admin_site='admin'):
"""
:param accessor: The attribute to look for on each ``obj``
(Model instance)
:param label: the short description for the
:meth:`~django.contrib.admin.ModelAdmin.changelist_view`
changelist column.
:param max_num: The maximum number of related item links to show.
:param more_separator: the content between items, and the "N more" link.
:param admin_site: the URL namespace of the admin.
"""
self.accessor = accessor
self.max_num = max_num
self.short_description = label
self.__name__ = label
self.admin_url = admin_site
self.more_content = more_separator or u'…'
self.allow_tags = True
def __call__(self, obj):
"""
adds a new column to the admin which shows the results of
``obj.accessor.all()`` as links to the appropriate modeladmin page.
:param obj: the current object in the changelist loop.
:return: a comma separated list of links to the related objects.
:rtype: unicode string.
"""
relation = getattr(obj, self.accessor)
if callable(relation):
relation = relation()
# TODO: it'd be really nice if this could handle methods on ``obj``
relation_obj = obj._meta.get_field_by_name(self.accessor)[0]
url_parts = {
'admin': self.admin_url,
'module': relation_obj.opts.app_label,
'klass': relation_obj.opts.module_name,
}
cl_link = '%(admin)s:%(module)s_%(klass)s_changelist'
c_link = '%(admin)s:%(module)s_%(klass)s_change'
try:
url = reverse(cl_link % url_parts)
except NoReverseMatch:
# Unable to find the relation mounted on the admin, we may throw
# the problem up to the user if in debug mode, otherwise we log it
# and move on.
if settings.DEBUG:
raise
logger.debug(object_not_mounted % {
'verbose_name': relation_obj.opts.object_name,
'site': u'"%s"' % self.admin_url,
})
return u''
# force evaluation now, so that we know what we've got in 1 query.
# We need the whole list, even if we're discarding some of it, so that
# we know what primary keys to filter the "more" changelist link for.
try:
object_list = list(relation.all())
except AttributeError:
# If for some reason it's not a descriptor/manager for a relation
# queryset - perhaps it's a foreign key or something. We'll hope
# for the best that we can continue.
# If relation is None, (a null FK, for example), continue assuming
# there's no relations to deal with.
object_list = list([relation])
n_more = u'%(url)s?id__in=%(filter_pks)s' % {
'url': url,
'filter_pks': ','.join([force_unicode(x.pk) for x in object_list]),
}
self._count = len(object_list)
# handle adding the "... 3 more" to the content.
more_link = u''
if self._count > self.max_num:
more_parts = {
'url': n_more,
'count': self._count - self.max_num,
'separator': self.more_content,
}
more_link = (u'%(separator)s<a href="%(url)s" '
u'class="changelist-morerelatedlink">%(count)d'
u' more</a>' % more_parts)
# handle generating the admin edit link for each individual relation.
edit_link = (u'<a href="%(url)s" class="changelist-relatedlink"'
u'>%(link)s</a>')
items = u', '.join([
edit_link % {
'url': reverse(c_link % url_parts, args=(x.pk,)),
'link': escape(x)
}
for x in object_list[0:self.max_num]
])
# more_link may be empty ...
return string_concat(items, more_link)
class LogEntrySparkline(object):
"""
An object capable of being used in the
:class:`~django.contrib.admin.ModelAdmin`
:attr:`~django.contrib.admin.ModelAdmin.list_display` to show a tiny
HTML-only sparkline of recent changes made via the admin::
class MyModelAdmin(ModelAdmin):
list_display = ['pk', LogEntrySparkline(days=60)]
.. warning::
It is worth highlighting that this will potentially result in a maximum
of **two** additional queries being executed, *per object*, to get
the :class:`~django.contrib.contenttypes.models.ContentType` and the
:class:`~django.contrib.admin.models.LogEntry` items.
This will be amortized down to **one** query, once all needed
:class:`~django.contrib.contenttypes.models.ContentType` objects have
been cached internally by `Django`_.
.. note::
For the sake of being portable, and not requiring we be in the
`INSTALLED_APPS`, the HTML and CSS are actually declared
on this class, rather than via a template which we might
:func:`~django.template.loader.render_to_string`. This may yet be a
mistake, so the API methods should be considered private.
An example of the output is provided below, though it may render slightly
differently due to font-sizing differences between this documentation and
the standard `Django`_ :class:`~django.contrib.admin.AdminSite`:
.. raw:: html
<div class="changelist-sparkline" style="overflow:hidden;border-bottom:1px dotted #5b80b2;height:1em; display:inline-block;">
<div class="changelist-sparkline-bar" style="height:0.3em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0.5em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0.6em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:1em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:1em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0.8em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0.1em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0.2em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0.35em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0.6em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0.75em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:1em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
<div class="changelist-sparkline-bar" style="height:0.3em;width:0.3em;vertical-align:baseline;margin:0 0.05em;display:inline-block;background-color:#7CA0C7;"></div>
</div>
:test case: :class:`helpfulfields.tests.SparklineTestCase`
"""
def __init__(self, days=14, label=logentry_label):
"""
:param days: How far back should we generate a sparkline for.
:param label: the short description for the
:meth:`~django.contrib.admin.ModelAdmin.changelist_view`
changelist column.
"""
self.short_description = label
self.__name__ = label
self.days = days
self.allow_tags = True
def __call__(self, obj):
"""
generates the necessary data for displaying a sparkline.
:param obj: the current object in the changelist loop.
:return: the HTML representing the sparkline graph.
:rtype: unicode string.
"""
ct = ContentType.objects.get_for_model(obj)
now = datetime.now()
back_to = now - timedelta(days=self.days)
# get all entries for this object in the last N days.
# Note: It doesn't matter what order results are returned in, as
# we're popping them into a separate, unsorted data structure anyway.
entries = LogEntry.objects.filter(content_type=ct, object_id=obj.pk,
action_time__gte=back_to)
# generate the initial list of items.
days_with_counts = {}
for day_distance in range(0, self.days):
new_datetime = now - timedelta(days=day_distance)
days_with_counts[new_datetime.date()] = 0
# populate the existing dates with change counts.
for entry in entries:
days_with_counts[entry.action_time.date()] += 1
maximum = max(days_with_counts.values()) #: 1em / 100%
if maximum < 1:
return logentry_empty
days_with_css_vals = {}
for key, val in days_with_counts.items():
val_as_percentage = val / maximum
days_with_css_vals[key] = val_as_percentage
results = sorted(days_with_css_vals.items(), key=itemgetter(0))
ctx = Context({
'sparks': results,
'sparkbar_css': self._sparkline_bar_css(),
'sparkline_css': self._sparkline_graph_css()
})
return self._sparkline_template().render(ctx)
def _sparkline_bar_css(self):
"""
generates the necessary CSS for an individual bar on the graph.
:return: the CSS, as minified as we can get it.
:rtype: unicode string.
"""
css = {
'width': '0.3em',
'margin': '0 0.05em',
'display': 'inline-block',
'background-color': '#7CA0C7',
'vertical-align': 'baseline',
}
return ''.join(['%s:%s;' % rule_val for rule_val in css.items()])
def _sparkline_graph_css(self):
"""
generates the necessary CSS for the sparkline graph itself.
:return: the CSS, as minified as we can get it.
:rtype: unicode string.
"""
css = {
'height': '1em',
'border-bottom': '1px dotted #5b80b2',
'overflow': 'hidden',
}
return ''.join(['%s:%s;' % rule_val for rule_val in css.items()])
def _sparkline_template(self):
"""
generates the HTML, implements each bar and the appropriate CSS.
:return: the template, ready to be rendered.
:rtype: :class:`~django.template.base.Template`
"""
return Template('''{% spaceless %}
<div class="changelist-sparkline" style="{{ sparkline_css }}">
{% for date, spark in sparks %}
<div class="changelist-sparkline-bar" style="height:{{ spark }}em;{{ sparkbar_css }}"></div>
{% endfor %}
</div>
{% endspaceless %}''')
| |
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
import binascii
import logging
import traceback
import itertools
import netaddr
from bitstring import BitArray
from yabgp.message.notification import Notification
from yabgp.message.update import Update
from yabgp.message.route_refresh import RouteRefresh
from yabgp.message.open import Open
from yabgp.common import constants as bgp_cons
from yabmp.common import constants as bmp_cons
from yabmp.common import exception as excp
LOG = logging.getLogger()
class BMPMessage(object):
"""
BMP message class.
definition of BMP message and methons used to decode message.
"""
def __init__(self):
self.version = None
self.msg_type = None
self.raw_body = None
self.msg_body = None
@staticmethod
def rd2str(rd):
"""
Convert 8 bytes of router distinguisher into string
according to rfc4364 section 4.2.
The first two bytes defines decode format
0 asn:nn
1 ip:nn
2 asn4:nn
"""
rd_type = int.from_bytes(rd[0:2], "big");
if rd_type == 0:
return str(int.from_bytes(rd[2:4], "big")) + ":" + str(int.from_bytes(rd[4:8], "big"))
elif rd_type == 1:
ip_value = int(binascii.b2a_hex(rd[2:6]), 16)
return str(netaddr.IPAddress(ip_value, version=4)) + ":" + str(int.from_bytes(rd[6:8], "big"))
elif rd_type == 2:
return str(int.from_bytes(rd[2:6], "big")) + ":" + str(int.from_bytes(rd[6:8], "big"))
else:
return str (rd[0:8])
@staticmethod
def parse_per_peer_header(raw_peer_header):
"""
decode per-peer header.
every bmp message has this header, and the header length is 42 bytes.
:param raw_peer_header: hex value of the header
:return:
"""
# 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Peer Type | Peer Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Peer Distinguisher (present based on peer type) |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Peer Address (16 bytes) |
# ~ ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Peer AS |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Peer BGP ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Timestamp (seconds) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Timestamp (microseconds) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
per_header_dict = {
'type': None,
'flags': None,
'dist': None,
'addr': None,
'as': None,
'bgpID': None,
'time': None
}
LOG.debug('decode per-peer header')
per_header_dict['type'] = struct.unpack('!B', raw_peer_header[0:1])[0]
# Peer Type = 0: Global Instance Peer
# Peer Type = 1: RD Instance Peer
# Peer Type = 2: Local Instance Peer
if per_header_dict['type'] not in [0, 1, 2]:
raise excp.UnknownPeerTypeValue(peer_type=per_header_dict['type'])
LOG.debug('peer type: %s ' % per_header_dict['type'])
# Peer Flags
peer_flags_value = binascii.b2a_hex(raw_peer_header[1:2])
hex_rep = hex(int(peer_flags_value, 16))
bit_array = BitArray(hex_rep)
valid_flags = [''.join(item)+'00000' for item in itertools.product('01', repeat=3)]
valid_flags.append('0000')
if bit_array.bin in valid_flags:
flags = dict(zip(bmp_cons.PEER_FLAGS, bit_array.bin))
per_header_dict['flags'] = flags
LOG.debug('Per Peer header flags %s' % flags)
else:
raise excp.UnknownPeerFlagValue(peer_flags=peer_flags_value)
LOG.debug('peer flag: %s ' % per_header_dict['flags'])
if per_header_dict['type'] in [1, 2]:
per_header_dict['dist'] = BMPMessage.rd2str(raw_peer_header[2:10])
LOG.debug('peer dist: %s' % per_header_dict['dist'])
ip_value = int(binascii.b2a_hex(raw_peer_header[10:26]), 16)
if int(per_header_dict['flags']['V']):
per_header_dict['addr'] = str(netaddr.IPAddress(ip_value, version=6))
else:
per_header_dict['addr'] = str(netaddr.IPAddress(ip_value, version=4))
per_header_dict['as'] = int(binascii.b2a_hex(raw_peer_header[26:30]), 16)
LOG.debug('peer as: %s' % per_header_dict['as'])
per_header_dict['bgpID'] = str(netaddr.IPAddress(int(binascii.b2a_hex(raw_peer_header[30:34]), 16)))
LOG.debug('peer bgp id: %s' % per_header_dict['bgpID'])
per_header_dict['time'] = (int(binascii.b2a_hex(raw_peer_header[34:38]), 16),
int(binascii.b2a_hex(raw_peer_header[38:42]), 16))
LOG.debug('timestamp: %s.%s' % (per_header_dict['time'][0], per_header_dict['time'][1]))
return per_header_dict
@staticmethod
def parse_route_monitoring_msg(msg):
"""
Route Monitoring messages are used for initial synchronization of
ADJ-RIBs-In. They are also used for ongoing monitoring of received
advertisements and withdraws.
Following the common BMP header and per-peer header is a BGP Update
PDU.
:param msg:
:return:
"""
LOG.debug('decode route monitoring message')
bgp_msg_type = struct.unpack('!B', msg[18:19])[0]
LOG.debug('bgp message type=%s' % bgp_msg_type)
msg = msg[bgp_cons.HDR_LEN:]
if bgp_msg_type == 2:
# decode update message
results = Update().parse(None, msg, asn4=True)
if results['sub_error']:
LOG.error('error: decode update message error!, error code: %s' % results['sub_error'])
LOG.error('Raw data: %s' % repr(results['hex']))
return None
return_result = {
'attr': results['attr'],
'nlri': results['nlri'],
'withdraw': results['withdraw']}
LOG.debug('bgp update message: %s' % return_result)
return bgp_msg_type, return_result
elif bgp_msg_type == 5:
bgp_route_refresh_msg = RouteRefresh().parse(msg=msg)
LOG.debug('bgp route refresh message: afi=%s,res=%s,safi=%s' % (bgp_route_refresh_msg[0],
bgp_route_refresh_msg[1],
bgp_route_refresh_msg[2]))
return bgp_msg_type, {'afi': bgp_route_refresh_msg[0],
'sub_type': bgp_route_refresh_msg[1],
'safi': bgp_route_refresh_msg[2]}
@staticmethod
def parse_route_mirroring_msg(msg):
"""
Route Mirroring messages are used for verbatim duplication of
messages as received. Following the common BMP header and per-peer
header is a set of TLVs that contain information about a message
or set of messages.
:param msg:
:return:
"""
LOG.debug('decode route mirroring message')
msg_dict = {}
open_l = []
update = []
notification = []
route_refresh = []
while msg:
mirror_type, length = struct.unpack('!HH', msg[0:4])
mirror_value = msg[4: 4 + length]
msg = msg[4 + length:]
if mirror_type == 0:
# BGP message type
bgp_msg_type = struct.unpack('!B', mirror_value[18:19])[0]
LOG.debug('bgp message type=%s' % bgp_msg_type)
bgp_msg_body = mirror_value[bgp_cons.HDR_LEN:]
if bgp_msg_type == 2:
# Update message
bgp_update_msg = Update().parse(None, bgp_msg_body, asn4=True)
if bgp_update_msg['sub_error']:
LOG.error('error: decode update message error!, error code: %s' % bgp_update_msg['sub_error'])
LOG.error('Raw data: %s' % repr(bgp_update_msg['hex']))
else:
update.append(bgp_update_msg)
elif bgp_msg_type == 5:
# Route Refresh message
bgp_route_refresh_msg = RouteRefresh().parse(msg=bgp_msg_body)
LOG.debug('bgp route refresh message: afi=%s,res=%s,safi=%s' % (bgp_route_refresh_msg[0],
bgp_route_refresh_msg[1],
bgp_route_refresh_msg[2]))
route_refresh.append(bgp_route_refresh_msg)
elif bgp_msg_type == 1:
# Open message
open_msg = Open().parse(bgp_msg_body)
open_l.append(open_msg)
elif bgp_msg_type == 3:
# Notification message
notification_msg = Notification().parse(bgp_msg_body)
notification.append(notification_msg)
elif mirror_type == 1:
# Information type.
# Amount of this TLV is not specified but we can assume
# only one per mirroring message is present.
info_code_type = struct.unpack('!H', mirror_value)[0]
msg_dict['1'] = info_code_type
else:
msg_dict[mirror_type] = binascii.unhexlify(binascii.hexlify(mirror_value))
LOG.info('unknow mirroring type, type = %s' % mirror_type)
msg_dict['0'] = {
'update': update,
'route_refresh': route_refresh,
'open': open_l,
'notification': notification
}
return msg_dict
@staticmethod
def parse_statistic_report_msg(msg):
"""
These messages contain information that could be used by the
monitoring station to observe interesting events that occur on the
router.
:return:
"""
# 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Stats Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# Each counter is encoded as follows,
# 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Stat Type | Stat Len |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Stat Data |
# ~ ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
LOG.info('decode statistic report message')
count_num = int(binascii.b2a_hex(msg[0:4]), 16)
count_dict = {}
msg = msg[4:]
while count_num:
stat_type, stat_len = struct.unpack('!HH', msg[0:4])
stat_data = msg[4:4+stat_len]
msg = msg[4+stat_len:]
stat_value = int(binascii.b2a_hex(stat_data), 16)
count_dict[stat_type] = stat_value
if stat_type not in bmp_cons.BMP_STAT_TYPE:
LOG.warning('unknown statistic report type, type=%s' % stat_type)
else:
LOG.info('stat_type=%s, stat_value=%s' % (bmp_cons.BMP_STAT_TYPE[stat_type], stat_value))
count_num -= 1
return count_dict
@staticmethod
def parse_peer_down_notification(msg):
"""
This message is used to indicate that a peering session was terminated.
:param msg:
:return:
"""
# 0 1 2 3 4 5 6 7 8
# +-+-+-+-+-+-+-+-+
# | Reason | 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Data (present if Reason = 1, 2 or 3) |
# ~ ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
LOG.info('decode peer down notification')
reason = int(binascii.b2a_hex(msg[0:1]), 16)
LOG.info('reason: %s' % reason)
data = msg[1:]
if reason == 1:
LOG.info('Reason : 1 The local system closed the session. Following the '
'Reason is a BGP PDU containing a BGP NOTIFICATION message that'
'would have been sent to the peer')
Notification().parse(message=data)
elif reason == 2:
LOG.info('Reason :2 The local system closed the session. No notification'
'message was sent. Following the reason code is a two-byte field'
'containing the code corresponding to the FSM Event which caused'
'the system to close the session (see Section 8.1 of [RFC4271]).'
'Two bytes both set to zero are used to indicate that no relevant'
'Event code is defined')
elif reason == 3:
LOG.info('Reason : 3 The remote system closed the session with a notification'
'message. Following the Reason is a BGP PDU containing the BGP'
'NOTIFICATION message as received from the peer.')
elif reason == 4:
LOG.info('Reason : 4 The remote system closed the session without a notification message')
else:
LOG.waring('unknown peer down notification reason')
return reason
@staticmethod
def parse_peer_up_notification(msg, peer_flag):
"""
The Peer Up message is used to indicate that a peering session has
come up (i.e., has transitioned into ESTABLISHED state). Following
the common BMP header and per-peer header is the following:
:param msg:
:param peer_flag: see parse_per_peer_header
:return:
"""
# 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Address (16 bytes) |
# ~ ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Port | Remote Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Sent OPEN Message #|
# ~ ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Received OPEN Message |
# ~ ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
LOG.info('decode peer up notification')
ip_value = int(binascii.b2a_hex(msg[0:16]), 16)
if int(peer_flag['V']):
# ipv6 address
ip_address = str(netaddr.IPAddress(ip_value, version=6))
else:
ip_address = str(netaddr.IPAddress(ip_value, version=4))
LOG.info('local address: %s' % ip_address)
local_port = int(binascii.b2a_hex(msg[16:18]), 16)
LOG.info('local port: %s' % local_port)
remote_port = int(binascii.b2a_hex(msg[18:20]), 16)
LOG.info('remote port: %s' % remote_port)
# decode sent and received open message
open_msg_data = msg[20:]
length = struct.unpack('!H', open_msg_data[16:18])[0]
sent_open_msg = Open().parse(open_msg_data[bgp_cons.HDR_LEN: length])
open_msg_data = open_msg_data[length:]
received_open_msg = Open().parse(open_msg_data[bgp_cons.HDR_LEN:])
LOG.info('sent open: %s' % sent_open_msg)
LOG.info('received open: %s' % received_open_msg)
return {
'local_address': ip_address,
'local_port': local_port,
'remote_port': remote_port,
'sent_open_msg': sent_open_msg,
'received_open_msg': received_open_msg
}
@staticmethod
def parse_initiation_msg(msg):
"""
The initiation message provides a means for the monitored router to
inform the monitoring station of its vendor, software version, and so
on. An initiation message MUST be sent as the first message after
the TCP session comes up. An initiation message MAY be sent at any
point thereafter, if warranted by a change on the monitored router.
The initiation message consists of the common BMP header followed by
two or more TLVs containing information about the monitored router,
as follows:
:return:
"""
# 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Information Type | Information Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Information (variable) |
# ~ ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
LOG.info('decode initiation message')
msg_dict = {}
while msg:
info_type, length = struct.unpack('!HH', msg[0:4])
info_value = msg[4: 4 + length]
msg = msg[4 + length:]
if info_type in bmp_cons.INIT_MSG_INFOR_TYPE:
msg_dict[bmp_cons.INIT_MSG_INFOR_TYPE[info_type]] = binascii.unhexlify(binascii.hexlify(info_value))
else:
msg_dict[info_type] = binascii.unhexlify(binascii.hexlify(info_value))
LOG.info('unknow information type, type = %s' % info_type)
LOG.info('initiation message = %s' % msg_dict)
return msg_dict
@staticmethod
def parse_termination_msg(msg):
"""
The termination message provides a way for a monitored router to
indicate why it is terminating a session. Although use of this
message is RECOMMENDED, a monitoring station must always be prepared
for the session to terminate with no message. Once the router has
sent a termination message, it MUST close the TCP session without
sending any further messages. Likewise, the monitoring station MUST
close the TCP session after receiving a termination message.
The termination message consists of the common BMP header followed by
one or more TLVs containing information about the reason for the
termination, as follows:
:return:
"""
# 0 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Information Type | Information Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Information (variable) #|
# ~ ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
LOG.info('decode termination message')
msg_dict = {}
while msg:
info_type, length = struct.unpack('!HH', msg[0:4])
info_value = msg[4: 4 + length]
msg = msg[4 + length:]
if info_type in bmp_cons.TERMI_MSG_INFOR_TYPE:
msg_dict[bmp_cons.TERMI_MSG_INFOR_TYPE[info_type]] = binascii.unhexlify(binascii.hexlify(info_value))
else:
msg_dict[info_type] = binascii.unhexlify(binascii.hexlify(info_value))
LOG.info('unknow information type, type = %s' % info_type)
LOG.info('termination message = %s' % msg_dict)
return msg_dict
def consume(self):
if self.msg_type in [0, 1, 2, 3, 6]:
try:
per_peer_header = self.parse_per_peer_header(self.raw_body[0:42])
self.msg_body = self.raw_body[42:]
if self.msg_type == 0:
return per_peer_header, self.parse_route_monitoring_msg(self.msg_body)
elif self.msg_type == 1:
return per_peer_header, self.parse_statistic_report_msg(self.msg_body)
elif self.msg_type == 2:
return per_peer_header, self.parse_peer_down_notification(self.msg_body)
elif self.msg_type == 3:
return per_peer_header, self.parse_peer_up_notification(self.msg_body, per_peer_header['flags'])
elif self.msg_type == 6:
return per_peer_header, self.parse_route_mirroring_msg(self.msg_body)
except Exception as e:
LOG.error(e)
error_str = traceback.format_exc()
LOG.debug(error_str)
# can not decode this BMP message
return None
elif self.msg_type == 4:
return None, self.parse_initiation_msg(self.raw_body)
elif self.msg_type == 5:
return None, self.parse_termination_msg(self.raw_body)
| |
#!/usr/bin/env python
# Copyright (c) 2015, Robot Control and Pattern Recognition Group,
# Institute of Control and Computation Engineering
# Warsaw University of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Warsaw University of Technology nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYright HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Dawid Seredynski
#
import roslib
roslib.load_manifest('velma_scripts')
import rospy
import tf
from std_msgs.msg import *
from sensor_msgs.msg import *
from geometry_msgs.msg import *
from visualization_msgs.msg import *
import tf
from tf import *
from tf.transformations import *
import tf_conversions.posemath as pm
from tf2_msgs.msg import *
import PyKDL
import math
import numpy as np
import copy
import matplotlib.pyplot as plt
import thread
import random
import openravepy
from openravepy import *
from optparse import OptionParser
from openravepy.misc import OpenRAVEGlobalArguments
import itertools
import rospkg
import multiprocessing
import velmautils
from velma import Velma
import openraveinstance
import conversions as conv
import rrt_star_connect_planner
import tree
import rosparam
import tasks
class TestOrOctomap:
"""
"""
def __init__(self):
self.pub_marker = velmautils.MarkerPublisher()
def spin(self):
simulation = True
rospack = rospkg.RosPack()
env_file=rospack.get_path('velma_scripts') + '/data/key/vis_test.env.xml'
xacro_uri=rospack.get_path('velma_description') + '/robots/velma.urdf.xacro'
srdf_path=rospack.get_path('velma_description') + '/robots/'
rrt = rrt_star_connect_planner.PlannerRRT(3, env_file, xacro_uri, srdf_path)
print "creating interface for Velma..."
# create the interface for Velma robot
self.velma = Velma()
print "done."
rospy.sleep(0.5)
self.velma.updateTransformations()
if simulation:
hv = [3.2, 3.2, 3.2, 3.2]
ht = [3000, 3000, 3000, 3000]
self.velma.moveHandLeft([120.0/180.0*math.pi, 120.0/180.0*math.pi, 120.0/180.0*math.pi, 0], hv, ht, 5000, True)
self.velma.moveHandRight([120.0/180.0*math.pi, 120.0/180.0*math.pi, 120.0/180.0*math.pi, 0], hv, ht, 5000, True)
rospy.sleep(1.0)
#
# Initialise Openrave
#
openrave = openraveinstance.OpenraveInstance()
openrave.startOpenraveURDF(env_file=env_file)
openrave.readRobot(xacro_uri=xacro_uri, srdf_path=srdf_path)
rrt.waitForInit()
openrave.setCamera(PyKDL.Vector(2.0, 0.0, 2.0), PyKDL.Vector(0.60, 0.0, 1.10))
openrave.updateRobotConfigurationRos(self.velma.js_pos)
# TEST: key hole goal
if False:
task = KeyRotTaskRRT(openrave)
task.SampleGoal(None, None)
exit(0)
# TEST: head IK
if False:
v_rot = 0.800
v_lean = 0.375
v_head = 0.392
h_cam = 0.0
v_cam = 0.225
head_kin = headkinematics.HeadKinematics(v_rot, v_lean, v_head, h_cam, v_cam)
openrave.addSphere("target_sphere", 0.05)
while not rospy.is_shutdown():
head_kin.UpdateTorsoPose(openrave.robot_rave.GetJoint("torso_0_joint").GetValue(0), openrave.robot_rave.GetJoint("torso_1_joint").GetValue(0))
target_pos = PyKDL.Vector(1, random.uniform(-1.5, 1.5), random.uniform(0,2))
head_kin.UpdateTargetPosition(target_pos.x(), target_pos.y(), target_pos.z())
openrave.updatePose("target_sphere", PyKDL.Frame(target_pos))
head_kin.TransformTargetToHeadFrame()
joint_pan, joint_tilt = head_kin.CalculateHeadPose()
if joint_pan == None:
continue
openrave.robot_rave.SetDOFValues([joint_pan, joint_tilt], [openrave.robot_rave.GetJoint("head_pan_joint").GetDOFIndex(), openrave.robot_rave.GetJoint("head_tilt_joint").GetDOFIndex()])
raw_input("Press ENTER to continue...")
exit(0)
raw_input("Press ENTER to continue...")
# T_B_E_list = [PyKDL.Frame(PyKDL.Rotation.RotY(90.0/180.0*math.pi), PyKDL.Vector(0.6, 0, 1.6))]
# path, dof_names = rrt.RRTstar(openrave.robot_rave.GetDOFValues(), tasks.GraspTaskRRT, ("right", T_B_E_list), 30.0)
path, dof_names = rrt.RRTstar(openrave.robot_rave.GetDOFValues(), tasks.KeyRotTaskRRT, ("right",), 30.0)
# path, dof_names = rrt.RRTstar(openrave.robot_rave.GetDOFValues(), tasks.LooAtTaskRRT, ("right",), 60.0)
# path, dof_names = rrt.RRTstar(openrave.robot_rave.GetDOFValues(), tasks.MoveArmsCloseTaskRRT, ("right",), 30.0)
traj = []
for i in range(len(path)-1):
q1 = path[i]
q2 = path[i+1]
for f in np.linspace(0.0, 1.0, 40):
traj.append( q1 * (1.0 - f) + q2 * f )
while True:
if raw_input("Type e to exit") == 'e':
break
openrave.showTrajectory(dof_names, 10.0, traj)
rrt.cleanup()
# raw_input("Press ENTER to exit...")
exit(0)
rospy.sleep(1)
openrave.runOctomap()
sphere = RaveCreateKinBody(openrave.env,'')
sphere.SetName("sphere")
sphere.InitFromSpheres(numpy.array([[0,0,0,0.05]]),True)
openrave.env.Add(sphere,True)
# test the collision checker for octomap
if True:
raw_input("Press ENTER to continue...")
ob = openrave.env.GetKinBody("_OCTOMAP_MAP_")
cc = openrave.env.GetCollisionChecker()
m_id = 0
for x in np.linspace(0,1.5,30):
for y in np.linspace(-1,1,40):
for z in np.linspace(1,2,20):
# print x,y,z
tr = conv.KDLToOpenrave(PyKDL.Frame(PyKDL.Vector(x,y,z)))
sphere.SetTransform(tr)
openrave.env.UpdatePublishedBodies()
report = CollisionReport()
ret = cc.CheckCollision(sphere, report)
# ret = openrave.env.CheckCollision(ob, report)
# print ret
if ret:
m_id = self.pub_marker.publishSinglePointMarker(PyKDL.Vector(x,y,z), m_id, r=1, g=0, b=0, a=1, namespace='default', frame_id='world', m_type=Marker.SPHERE, scale=Vector3(0.1, 0.1, 0.1), T=None)
continue
if report.plink1 == None:
print None
else:
print report.plink1.GetParent().GetName(), report.plink2.GetName()
# print " ", report.vLinkColliding
for link1, link2 in report.vLinkColliding:
print " ", link1.GetParent().GetName(), link2.GetName()
# print report.plink1.GetParent().GetName(), report.plink2.GetParent().GetName()
exit(0)
self.pub_head_look_at = rospy.Publisher("/head_lookat_pose", geometry_msgs.msg.Pose)
raw_input("Press ENTER to look around...")
# self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-0.5,1))))
# raw_input("Press ENTER to exit...")
# exit(0)
raw_input("Press ENTER to look around...")
self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,2))))
raw_input("Press ENTER to look around...")
self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,1,2))))
raw_input("Press ENTER to look around...")
self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,1,1.2))))
raw_input("Press ENTER to look around...")
self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,1.2))))
raw_input("Press ENTER to look around...")
self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-1,1.2))))
raw_input("Press ENTER to look around...")
self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(0.2,-1,2))))
raw_input("Press ENTER to look around...")
self.pub_head_look_at.publish(pm.toMsg(PyKDL.Frame(PyKDL.Vector(1,0,2))))
raw_input(".")
exit(0)
if __name__ == '__main__':
rospy.init_node('test_or_octomap')
task = TestOrOctomap()
rospy.sleep(1)
task.spin()
| |
import os
from django.apps import apps
from django.contrib.staticfiles.finders import get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.storage import FileSystemStorage
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.utils.functional import cached_property
class Command(BaseCommand):
"""
Copies or symlinks static files from different locations to the
settings.STATIC_ROOT.
"""
help = "Collect static files in a single location."
requires_system_checks = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.copied_files = []
self.symlinked_files = []
self.unmodified_files = []
self.post_processed_files = []
self.storage = staticfiles_storage
self.style = no_style()
@cached_property
def local(self):
try:
self.storage.path('')
except NotImplementedError:
return False
return True
def add_arguments(self, parser):
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help="Do NOT prompt the user for input of any kind.",
)
parser.add_argument(
'--no-post-process', action='store_false', dest='post_process',
help="Do NOT post process collected files.",
)
parser.add_argument(
'-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more.",
)
parser.add_argument(
'-n', '--dry-run', action='store_true',
help="Do everything except modify the filesystem.",
)
parser.add_argument(
'-c', '--clear', action='store_true',
help="Clear the existing files using the storage "
"before trying to copy or link the original file.",
)
parser.add_argument(
'-l', '--link', action='store_true',
help="Create a symbolic link to each file instead of copying.",
)
parser.add_argument(
'--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
help="Don't ignore the common private glob-style patterns (defaults to 'CVS', '.*' and '*~').",
)
def set_options(self, **options):
"""
Set instance variables based on an options dict
"""
self.interactive = options['interactive']
self.verbosity = options['verbosity']
self.symlink = options['link']
self.clear = options['clear']
self.dry_run = options['dry_run']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += apps.get_app_config('staticfiles').ignore_patterns
self.ignore_patterns = list(set(os.path.normpath(p) for p in ignore_patterns))
self.post_process = options['post_process']
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle() to facilitate testing.
"""
if self.symlink and not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = {}
for finder in get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
else:
self.log(
"Found another file with the destination path '%s'. It "
"will be ignored since only the first encountered file "
"is collected. If this is not what you want, make sure "
"every static file has a unique path." % prefixed_path,
level=1,
)
# Storage backends may define a post_process() method.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if isinstance(processed, Exception):
self.stderr.write("Post-processing '%s' failed!" % original_path)
# Add a blank line before the traceback, otherwise it's
# too easy to miss the relevant part of the error message.
self.stderr.write("")
raise processed
if processed:
self.log("Post-processed '%s' as '%s'" %
(original_path, processed_path), level=2)
self.post_processed_files.append(original_path)
else:
self.log("Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
def handle(self, **options):
self.set_options(**options)
message = ['\n']
if self.dry_run:
message.append(
'You have activated the --dry-run option so no files will be modified.\n\n'
)
message.append(
'You have requested to collect static files at the destination\n'
'location as specified in your settings'
)
if self.is_local_storage() and self.storage.location:
destination_path = self.storage.location
message.append(':\n\n %s\n\n' % destination_path)
should_warn_user = (
self.storage.exists(destination_path) and
any(self.storage.listdir(destination_path))
)
else:
destination_path = None
message.append('.\n\n')
# Destination files existence not checked; play it safe and warn.
should_warn_user = True
if self.interactive and should_warn_user:
if self.clear:
message.append('This will DELETE ALL FILES in this location!\n')
else:
message.append('This will overwrite existing files!\n')
message.append(
'Are you sure you want to do this?\n\n'
"Type 'yes' to continue, or 'no' to cancel: "
)
if input(''.join(message)) != 'yes':
raise CommandError("Collecting static files cancelled.")
collected = self.collect()
modified_count = len(collected['modified'])
unmodified_count = len(collected['unmodified'])
post_processed_count = len(collected['post_processed'])
if self.verbosity >= 1:
template = ("\n%(modified_count)s %(identifier)s %(action)s"
"%(destination)s%(unmodified)s%(post_processed)s.\n")
summary = template % {
'modified_count': modified_count,
'identifier': 'static file' + ('' if modified_count == 1 else 's'),
'action': 'symlinked' if self.symlink else 'copied',
'destination': (" to '%s'" % destination_path if destination_path else ''),
'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''),
'post_processed': (collected['post_processed'] and
', %s post-processed'
% post_processed_count or ''),
}
return summary
def log(self, msg, level=2):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
def is_local_storage(self):
return isinstance(self.storage, FileSystemStorage)
def clear_dir(self, path):
"""
Delete the given relative path using the destination storage backend.
"""
if not self.storage.exists(path):
return
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" % fpath, level=1)
else:
self.log("Deleting '%s'" % fpath, level=1)
try:
full_path = self.storage.path(fpath)
except NotImplementedError:
self.storage.delete(fpath)
else:
if not os.path.exists(full_path) and os.path.lexists(full_path):
# Delete broken symlinks
os.unlink(full_path)
else:
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d))
def delete_file(self, path, prefixed_path, source_storage):
"""
Check if the target file should be deleted if it already exists.
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = self.storage.get_modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support get_modified_time() or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.get_modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
# If it's --link mode and the path isn't a link (i.e.
# the previous collectstatic wasn't with --link) or if
# it's non-link mode and the path is a link (i.e. the
# previous collectstatic was with --link), the old
# links/files must be deleted so it's not safe to skip
# unmodified files.
can_skip_unmodified_files = not (self.symlink ^ os.path.islink(full_path))
else:
# In remote storages, skipping is only based on the
# modified times since symlinks aren't relevant.
can_skip_unmodified_files = True
# Avoid sub-second precision (see #14665, #19540)
file_is_unmodified = (
target_last_modified.replace(microsecond=0) >=
source_last_modified.replace(microsecond=0)
)
if file_is_unmodified and can_skip_unmodified_files:
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True
def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=2)
full_path = self.storage.path(prefixed_path)
os.makedirs(os.path.dirname(full_path), exist_ok=True)
try:
if os.path.lexists(full_path):
os.unlink(full_path)
os.symlink(source_path, full_path)
except AttributeError:
import platform
raise CommandError("Symlinking is not supported by Python %s." %
platform.python_version())
except NotImplementedError:
import platform
raise CommandError("Symlinking is not supported in this "
"platform (%s)." % platform.platform())
except OSError as e:
raise CommandError(e)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=2)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
self.copied_files.append(prefixed_path)
| |
from textwrap import dedent
from twisted.trial import unittest
from pymeta.runtime import ParseError, OMetaBase, EOFError, expected
from pymeta.boot import BootOMetaGrammar
from pymeta.builder import TreeBuilder, moduleFromGrammar
class HandyWrapper(object):
"""
Convenient grammar wrapper for parsing strings.
"""
def __init__(self, klass):
"""
@param klass: The grammar class to be wrapped.
"""
self.klass = klass
def __getattr__(self, name):
"""
Return a function that will instantiate a grammar and invoke the named
rule.
@param: Rule name.
"""
def doIt(s):
"""
@param s: The string to be parsed by the wrapped grammar.
"""
obj = self.klass(s)
ret, err = obj.apply(name)
try:
extra, _ = obj.input.head()
except EOFError:
try:
return ''.join(ret)
except TypeError:
return ret
else:
raise err
return doIt
class OMetaTestCase(unittest.TestCase):
"""
Tests of OMeta grammar compilation.
"""
classTested = BootOMetaGrammar
def compile(self, grammar):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
g = self.classTested(grammar)
tree = g.parseGrammar('TestGrammar', TreeBuilder)
result = moduleFromGrammar(tree, 'TestGrammar', OMetaBase, {})
return HandyWrapper(result)
def test_literals(self):
"""
Input matches can be made on literal characters.
"""
g = self.compile("digit ::= '1'")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_multipleRules(self):
"""
Grammars with more than one rule work properly.
"""
g = self.compile("""
digit ::= '1'
aLetter ::= 'a'
""")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_escapedLiterals(self):
"""
Input matches can be made on escaped literal characters.
"""
g = self.compile(r"newline ::= '\n'")
self.assertEqual(g.newline("\n"), "\n")
def test_integers(self):
"""
Input matches can be made on literal integers.
"""
g = self.compile("stuff ::= 17 0x1F -2 0177")
self.assertEqual(g.stuff([17, 0x1f, -2, 0o177]), 0o177)
self.assertRaises(ParseError, g.stuff, [1, 2, 3])
def test_star(self):
"""
Input matches can be made on zero or more repetitions of a pattern.
"""
g = self.compile("xs ::= 'x'*")
self.assertEqual(g.xs(""), "")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
def test_plus(self):
"""
Input matches can be made on one or more repetitions of a pattern.
"""
g = self.compile("xs ::= 'x'+")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
self.assertRaises(ParseError, g.xs, "")
def test_sequencing(self):
"""
Input matches can be made on a sequence of patterns.
"""
g = self.compile("twelve ::= '1' '2'")
self.assertEqual(g.twelve("12"), "2");
self.assertRaises(ParseError, g.twelve, "1")
def test_alternatives(self):
"""
Input matches can be made on one of a set of alternatives.
"""
g = self.compile("digit ::= '0' | '1' | '2'")
self.assertEqual(g.digit("0"), "0")
self.assertEqual(g.digit("1"), "1")
self.assertEqual(g.digit("2"), "2")
self.assertRaises(ParseError, g.digit, "3")
def test_optional(self):
"""
Subpatterns can be made optional.
"""
g = self.compile("foo ::= 'x' 'y'? 'z'")
self.assertEqual(g.foo("xyz"), 'z')
self.assertEqual(g.foo("xz"), 'z')
def test_apply(self):
"""
Other productions can be invoked from within a production.
"""
g = self.compile("""
digit ::= '0' | '1'
bits ::= <digit>+
""")
self.assertEqual(g.bits('0110110'), '0110110')
def test_negate(self):
"""
Input can be matched based on its failure to match a pattern.
"""
g = self.compile("foo ::= ~'0' <anything>")
self.assertEqual(g.foo("1"), "1")
self.assertRaises(ParseError, g.foo, "0")
def test_ruleValue(self):
"""
Productions can specify a Python expression that provides the result
of the parse.
"""
g = self.compile("foo ::= '1' => 7")
self.assertEqual(g.foo('1'), 7)
def test_ruleValueEscapeQuotes(self):
"""
Escaped quotes are handled properly in Python expressions.
"""
g = self.compile(r"""escapedChar ::= '\'' => '\\\''""")
self.assertEqual(g.escapedChar("'"), "\\'")
def test_ruleValueEscapeSlashes(self):
"""
Escaped slashes are handled properly in Python expressions.
"""
g = self.compile(r"""escapedChar ::= '\\' => '\\'""")
self.assertEqual(g.escapedChar("\\"), "\\")
def test_lookahead(self):
"""
Doubled negation does lookahead.
"""
g = self.compile("""
foo ::= ~~(:x) <bar x>
bar :x ::= :a :b ?(x == a == b) => x
""")
self.assertEqual(g.foo("11"), '1')
self.assertEqual(g.foo("22"), '2')
def test_binding(self):
"""
The result of a parsing expression can be bound to a name.
"""
g = self.compile("foo ::= '1':x => int(x) * 2")
self.assertEqual(g.foo("1"), 2)
def test_bindingAccess(self):
"""
Bound names in a rule can be accessed on the grammar's "locals" dict.
"""
gg = self.classTested("stuff ::= '1':a ('2':b | '3':c)")
t = gg.parseGrammar('TestGrammar', TreeBuilder)
G = moduleFromGrammar(t, 'TestGrammar', OMetaBase, {})
g = G("12")
self.assertEqual(g.apply("stuff")[0], '2')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['b'], '2')
g = G("13")
self.assertEqual(g.apply("stuff")[0], '3')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['c'], '3')
def test_predicate(self):
"""
Python expressions can be used to determine the success or failure of a
parse.
"""
g = self.compile("""
digit ::= '0' | '1'
double_bits ::= <digit>:a <digit>:b ?(a == b) => int(b)
""")
self.assertEqual(g.double_bits("00"), 0)
self.assertEqual(g.double_bits("11"), 1)
self.assertRaises(ParseError, g.double_bits, "10")
self.assertRaises(ParseError, g.double_bits, "01")
def test_parens(self):
"""
Parens can be used to group subpatterns.
"""
g = self.compile("foo ::= 'a' ('b' | 'c')")
self.assertEqual(g.foo("ab"), "b")
self.assertEqual(g.foo("ac"), "c")
def test_action(self):
"""
Python expressions can be run as actions with no effect on the result
of the parse.
"""
g = self.compile("""foo ::= ('1'*:ones !(False) !(ones.insert(0, '0')) => ''.join(ones))""")
self.assertEqual(g.foo("111"), "0111")
def test_bindNameOnly(self):
"""
A pattern consisting of only a bind name matches a single element and
binds it to that name.
"""
g = self.compile("foo ::= '1' :x '2' => x")
self.assertEqual(g.foo("132"), "3")
def test_args(self):
"""
Productions can take arguments.
"""
g = self.compile("""
digit ::= ('0' | '1' | '2'):d => int(d)
foo :x :ignored ::= (?(x > 1) '9' | ?(x <= 1) '8'):d => int(d)
baz ::= <digit>:a <foo a None>:b => [a, b]
""")
self.assertEqual(g.baz("18"), [1, 8])
self.assertEqual(g.baz("08"), [0, 8])
self.assertEqual(g.baz("29"), [2, 9])
self.assertRaises(ParseError, g.foo, "28")
def test_patternMatch(self):
"""
Productions can pattern-match on arguments.
Also, multiple definitions of a rule can be done in sequence.
"""
g = self.compile("""
fact 0 => 1
fact :n ::= <fact (n - 1)>:m => n * m
""")
self.assertEqual(g.fact([3]), 6)
def test_listpattern(self):
"""
Brackets can be used to match contents of lists.
"""
g = self.compile("""
digit ::= :x ?(x.isdigit()) => int(x)
interp ::= [<digit>:x '+' <digit>:y] => x + y
""")
self.assertEqual(g.interp([['3', '+', '5']]), 8)
def test_listpatternresult(self):
"""
The result of a list pattern is the entire list.
"""
g = self.compile("""
digit ::= :x ?(x.isdigit()) => int(x)
interp ::= [<digit>:x '+' <digit>:y]:z => (z, x + y)
""")
e = ['3', '+', '5']
self.assertEqual(g.interp([e]), (e, 8))
def test_recursion(self):
"""
Rules can call themselves.
"""
g = self.compile("""
interp ::= (['+' <interp>:x <interp>:y] => x + y
| ['*' <interp>:x <interp>:y] => x * y
| :x ?(isinstance(x, str) and x.isdigit()) => int(x))
""")
self.assertEqual(g.interp([['+', '3', ['*', '5', '2']]]), 13)
def test_leftrecursion(self):
"""
Left-recursion is detected and compiled appropriately.
"""
g = self.compile("""
num ::= (<num>:n <digit>:d => n * 10 + d
| <digit>)
digit ::= :x ?(x.isdigit()) => int(x)
""")
self.assertEqual(g.num("3"), 3)
self.assertEqual(g.num("32767"), 32767)
def test_characterVsSequence(self):
"""
Characters (in single-quotes) are not regarded as sequences.
"""
g = self.compile("""
interp ::= ([<interp>:x '+' <interp>:y] => x + y
| [<interp>:x '*' <interp>:y] => x * y
| :x ?(isinstance(x, basestring) and x.isdigit()) => int(x))
""")
self.assertEqual(g.interp([['3', '+', ['5', '*', '2']]]), 13)
try:
self.assertEqual(g.interp([[u'3', u'+', [u'5', u'*', u'2']]]), 13)
except SyntaxError:
# Python 3.0-3.2
pass
def test_string(self):
"""
Strings in double quotes match string objects.
"""
g = self.compile("""
interp ::= ["Foo" 1 2] => 3
""")
self.assertEqual(g.interp([["Foo", 1, 2]]), 3)
def test_argEscape(self):
"""
Regression test for bug #239344.
"""
g = self.compile("""
memo_arg :arg ::= <anything> ?(False)
trick ::= <letter> <memo_arg 'c'>
broken ::= <trick> | <anything>*
""")
self.assertEqual(g.broken('ab'), 'ab')
def test_comments(self):
"""
Comments in grammars are accepted and ignored.
"""
g = self.compile("""
#comment here
digit ::= ( '0' #second comment
| '1') #another one
bits ::= <digit>+ #last one
""")
self.assertEqual(g.bits('0110110'), '0110110')
class V2TestCase(unittest.TestCase):
"""
Tests of OMeta2 grammar compilation.
"""
classTested = None
def setUp(self):
"""
Run the OMeta tests with the self-hosted grammar instead of the boot
one.
"""
#imported here to prevent OMetaGrammar from being constructed before
#tests are run
if self.classTested is None:
from pymeta.grammar import OMeta2Grammar
self.classTested = OMeta2Grammar
def compile(self, grammar):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
g = self.classTested(dedent(grammar))
tree = g.parseGrammar('TestGrammar', TreeBuilder)
result = moduleFromGrammar(tree, 'TestGrammar', OMetaBase, {})
return HandyWrapper(result)
def test_literals(self):
"""
Input matches can be made on literal characters.
"""
g = self.compile("digit = '1'")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_multipleRules(self):
"""
Grammars with more than one rule work properly.
"""
g = self.compile("""
digit = '1'
aLetter = 'a'
""")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_escapedLiterals(self):
"""
Input matches can be made on escaped literal characters.
"""
g = self.compile(r"newline = '\n'")
self.assertEqual(g.newline("\n"), "\n")
def test_integers(self):
"""
Input matches can be made on literal integers.
"""
g = self.compile("stuff = 17 0x1F -2 0177")
self.assertEqual(g.stuff([17, 0x1f, -2, 0o177]), 0o177)
self.assertRaises(ParseError, g.stuff, [1, 2, 3])
def test_star(self):
"""
Input matches can be made on zero or more repetitions of a pattern.
"""
g = self.compile("xs = 'x'*")
self.assertEqual(g.xs(""), "")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
def test_plus(self):
"""
Input matches can be made on one or more repetitions of a pattern.
"""
g = self.compile("xs = 'x'+")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
self.assertRaises(ParseError, g.xs, "")
def test_sequencing(self):
"""
Input matches can be made on a sequence of patterns.
"""
g = self.compile("twelve = '1' '2'")
self.assertEqual(g.twelve("12"), "2");
self.assertRaises(ParseError, g.twelve, "1")
def test_alternatives(self):
"""
Input matches can be made on one of a set of alternatives.
"""
g = self.compile("digit = '0' | '1' | '2'")
self.assertEqual(g.digit("0"), "0")
self.assertEqual(g.digit("1"), "1")
self.assertEqual(g.digit("2"), "2")
self.assertRaises(ParseError, g.digit, "3")
def test_optional(self):
"""
Subpatterns can be made optional.
"""
g = self.compile("foo = 'x' 'y'? 'z'")
self.assertEqual(g.foo("xyz"), 'z')
self.assertEqual(g.foo("xz"), 'z')
def test_apply(self):
"""
Other productions can be invoked from within a production.
"""
g = self.compile("""
digit = '0' | '1'
bits = digit+
""")
self.assertEqual(g.bits('0110110'), '0110110')
def test_negate(self):
"""
Input can be matched based on its failure to match a pattern.
"""
g = self.compile("foo = ~'0' anything")
self.assertEqual(g.foo("1"), "1")
self.assertRaises(ParseError, g.foo, "0")
def test_ruleValue(self):
"""
Productions can specify a Python expression that provides the result
of the parse.
"""
g = self.compile("foo = '1' -> 7")
self.assertEqual(g.foo('1'), 7)
def test_lookahead(self):
"""
Doubled negation does lookahead.
"""
g = self.compile("""
foo = ~~(:x) bar(x)
bar :x = :a :b ?(x == a == b) -> x
""")
self.assertEqual(g.foo("11"), '1')
self.assertEqual(g.foo("22"), '2')
def test_binding(self):
"""
The result of a parsing expression can be bound to a name.
"""
g = self.compile("foo = '1':x -> int(x) * 2")
self.assertEqual(g.foo("1"), 2)
def test_bindingAccess(self):
"""
Bound names in a rule can be accessed on the grammar's "locals" dict.
"""
gg = self.classTested("stuff = '1':a ('2':b | '3':c)")
t = gg.parseGrammar('TestGrammar', TreeBuilder)
G = moduleFromGrammar(t, 'TestGrammar', OMetaBase, {})
g = G("12")
self.assertEqual(g.apply("stuff")[0], '2')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['b'], '2')
g = G("13")
self.assertEqual(g.apply("stuff")[0], '3')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['c'], '3')
def test_predicate(self):
"""
Python expressions can be used to determine the success or failure of a
parse.
"""
g = self.compile("""
digit = '0' | '1'
double_bits = digit:a digit:b ?(a == b) -> int(b)
""")
self.assertEqual(g.double_bits("00"), 0)
self.assertEqual(g.double_bits("11"), 1)
self.assertRaises(ParseError, g.double_bits, "10")
self.assertRaises(ParseError, g.double_bits, "01")
def test_parens(self):
"""
Parens can be used to group subpatterns.
"""
g = self.compile("foo = 'a' ('b' | 'c')")
self.assertEqual(g.foo("ab"), "b")
self.assertEqual(g.foo("ac"), "c")
def test_action(self):
"""
Python expressions can be run as actions with no effect on the result
of the parse.
"""
g = self.compile("""foo = ('1'*:ones !(False) !(ones.insert(0, '0')) -> ''.join(ones))""")
self.assertEqual(g.foo("111"), "0111")
def test_bindNameOnly(self):
"""
A pattern consisting of only a bind name matches a single element and
binds it to that name.
"""
g = self.compile("foo = '1' :x '2' -> x")
self.assertEqual(g.foo("132"), "3")
def test_args(self):
"""
Productions can take arguments.
"""
g = self.compile("""
digit = ('0' | '1' | '2'):d -> int(d)
foo :x = (?(x > 1) '9' | ?(x <= 1) '8'):d -> int(d)
baz = digit:a foo(a):b -> [a, b]
""")
self.assertEqual(g.baz("18"), [1, 8])
self.assertEqual(g.baz("08"), [0, 8])
self.assertEqual(g.baz("29"), [2, 9])
self.assertRaises(ParseError, g.foo, "28")
def test_patternMatch(self):
"""
Productions can pattern-match on arguments.
Also, multiple definitions of a rule can be done in sequence.
"""
g = self.compile("""
fact 0 -> 1
fact :n = fact((n - 1)):m -> n * m
""")
self.assertEqual(g.fact([3]), 6)
def test_listpattern(self):
"""
Brackets can be used to match contents of lists.
"""
g = self.compile("""
digit = :x ?(x.isdigit()) -> int(x)
interp = [digit:x '+' digit:y] -> x + y
""")
self.assertEqual(g.interp([['3', '+', '5']]), 8)
def test_listpatternresult(self):
"""
The result of a list pattern is the entire list.
"""
g = self.compile("""
digit = :x ?(x.isdigit()) -> int(x)
interp = [digit:x '+' digit:y]:z -> (z, x + y)
""")
e = ['3', '+', '5']
self.assertEqual(g.interp([e]), (e, 8))
def test_recursion(self):
"""
Rules can call themselves.
"""
g = self.compile("""
interp = (['+' interp:x interp:y] -> x + y
| ['*' interp:x interp:y] -> x * y
| :x ?(isinstance(x, str) and x.isdigit()) -> int(x))
""")
self.assertEqual(g.interp([['+', '3', ['*', '5', '2']]]), 13)
def test_leftrecursion(self):
"""
Left-recursion is detected and compiled appropriately.
"""
g = self.compile("""
num = (num:n digit:d -> n * 10 + d
| digit)
digit = :x ?(x.isdigit()) -> int(x)
""")
self.assertEqual(g.num("3"), 3)
self.assertEqual(g.num("32767"), 32767)
def test_characterVsSequence(self):
"""
Characters (in single-quotes) are not regarded as sequences.
"""
g = self.compile("""
interp = ([interp:x '+' interp:y] -> x + y
| [interp:x '*' interp:y] -> x * y
| :x ?(isinstance(x, basestring) and x.isdigit()) -> int(x))
""")
self.assertEqual(g.interp([['3', '+', ['5', '*', '2']]]), 13)
try:
self.assertEqual(g.interp([['3', '+', ['5', '*', '2']]]), 13)
except SyntaxError:
# Python 3.0-3.2
pass
def test_string(self):
"""
Strings in double quotes match string objects.
"""
g = self.compile("""
interp = ["Foo" 1 2] -> 3
""")
self.assertEqual(g.interp([["Foo", 1, 2]]), 3)
def test_argEscape(self):
"""
Regression test for bug #239344.
"""
g = self.compile("""
memo_arg :arg = anything ?(False)
trick = letter memo_arg('c')
broken = trick | anything*
""")
self.assertEqual(g.broken('ab'), 'ab')
class PyExtractorTest(unittest.TestCase):
"""
Tests for finding Python expressions in OMeta grammars.
"""
def findInGrammar(self, expr):
"""
L{OMeta.pythonExpr()} can extract a single Python expression from a
string, ignoring the text following it.
"""
o = OMetaBase(expr + "\nbaz ::= ...\n")
self.assertEqual(o.pythonExpr()[0][0], expr)
def test_expressions(self):
"""
L{OMeta.pythonExpr()} can recognize various paired delimiters properly
and include newlines in expressions where appropriate.
"""
self.findInGrammar("x")
self.findInGrammar("(x + 1)")
self.findInGrammar("{x: (y)}")
self.findInGrammar("x, '('")
self.findInGrammar('x, "("')
self.findInGrammar('x, """("""')
self.findInGrammar('(x +\n 1)')
self.findInGrammar('[x, "]",\n 1]')
self.findInGrammar('{x: "]",\ny: "["}')
o = OMetaBase("foo(x[1]])\nbaz ::= ...\n")
self.assertRaises(ParseError, o.pythonExpr)
o = OMetaBase("foo(x[1]\nbaz ::= ...\n")
self.assertRaises(ParseError, o.pythonExpr)
class MakeGrammarTest(unittest.TestCase):
"""
Test the definition of grammars via the 'makeGrammar' method.
"""
def test_makeGrammar(self):
#imported here to prevent OMetaGrammar from being constructed before
#tests are run
from pymeta.grammar import OMeta
results = []
grammar = """
digit ::= :x ?('0' <= x <= '9') => int(x)
num ::= (<num>:n <digit>:d !(results.append(True)) => n * 10 + d
| <digit>)
"""
TestGrammar = OMeta.makeGrammar(grammar, {'results':results})
g = TestGrammar("314159")
self.assertEqual(g.apply("num")[0], 314159)
self.assertNotEqual(len(results), 0)
def test_brokenGrammar(self):
from pymeta.grammar import OMeta
grammar = """
andHandler ::= <handler>:h1 'and' <handler>:h2 => And(h1, h2)
"""
e = self.assertRaises(ParseError, OMeta.makeGrammar, grammar, {})
self.assertEquals(e.position, 39)
self.assertEquals(e.error, [("expected", "token", "'")])
def test_subclassing(self):
"""
A subclass of an OMeta subclass should be able to call rules on its
parent, and access variables in its scope.
"""
from pymeta.grammar import OMeta
grammar1 = """
dig ::= :x ?(a <= x <= b) => int(x)
"""
TestGrammar1 = OMeta.makeGrammar(grammar1, {'a':'0', 'b':'9'})
grammar2 = """
num ::= (<num>:n <dig>:d => n * base + d
| <dig>)
"""
TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {'base':10})
g = TestGrammar2("314159")
self.assertEqual(g.apply("num")[0], 314159)
grammar3 = """
dig ::= :x ?(a <= x <= b or c <= x <= d) => int(x, base)
"""
TestGrammar3 = TestGrammar2.makeGrammar(grammar3, {'c':'a', 'd':'f', 'base':16})
g = TestGrammar3("abc123")
self.assertEqual(g.apply("num")[0], 11256099)
def test_super(self):
"""
Rules can call the implementation in a superclass.
"""
from pymeta.grammar import OMeta
grammar1 = "expr ::= <letter>"
TestGrammar1 = OMeta.makeGrammar(grammar1, {})
grammar2 = "expr ::= <super> | <digit>"
TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {})
self.assertEqual(TestGrammar2("x").apply("expr")[0], "x")
self.assertEqual(TestGrammar2("3").apply("expr")[0], "3")
class SelfHostingTest(OMetaTestCase):
"""
Tests for the OMeta grammar parser defined with OMeta.
"""
classTested = None
def setUp(self):
"""
Run the OMeta tests with the self-hosted grammar instead of the boot
one.
"""
#imported here to prevent OMetaGrammar from being constructed before
#tests are run
if self.classTested is None:
from pymeta.grammar import OMetaGrammar
self.classTested = OMetaGrammar
class NullOptimizerTest(OMetaTestCase):
"""
Tests of OMeta grammar compilation via the null optimizer.
"""
def compile(self, grammar):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
from pymeta.grammar import OMetaGrammar, NullOptimizer
g = OMetaGrammar(grammar)
tree = g.parseGrammar('TestGrammar', TreeBuilder)
opt = NullOptimizer([tree])
opt.builder = TreeBuilder("TestGrammar", opt)
tree, err = opt.apply("grammar")
grammarClass = moduleFromGrammar(tree, 'TestGrammar', OMetaBase, {})
return HandyWrapper(grammarClass)
class ErrorReportingTests(unittest.TestCase):
def compile(self, grammar):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
g = BootOMetaGrammar(grammar)
tree = g.parseGrammar('TestGrammar', TreeBuilder)
result = moduleFromGrammar(tree, 'TestGrammar', OMetaBase, {})
return HandyWrapper(result)
def test_rawReporting(self):
"""
Errors from parsing contain enough info to figure out what was
expected and where.
"""
g = self.compile("""
start ::= ( (<person> <feeling> <target>)
| (<adjective> <animal> <feeling> <token "some"> <target>))
adjective ::= <token "crazy"> | <token "clever"> | <token "awesome">
feeling ::= <token "likes"> | <token "loves"> | <token "hates">
animal ::= <token "monkey"> | <token "horse"> | <token "unicorn">
person ::= <token "crazy horse"> | <token "hacker">
target ::= (<token "bananas"> | <token "robots"> | <token "americans">
| <token "bacon">)
""")
#some warmup
g.start("clever monkey hates some robots")
g.start("awesome unicorn loves some bacon")
g.start("crazy horse hates americans")
g.start("hacker likes robots")
e = self.assertRaises(ParseError, g.start,
"clever hacker likes bacon")
self.assertEqual(e.position, 8)
self.assertEqual(e.error, [('expected', "token", "horse")])
e = self.assertRaises(ParseError, g.start,
"crazy horse likes some grass")
#matching "some" means second branch of 'start' is taken
self.assertEqual(e.position, 23)
self.assertEqual(e.error, [('expected', "token", "bananas"),
('expected', 'token', "bacon"),
('expected', "token", "robots"),
('expected', "token", "americans")])
e = self.assertRaises(ParseError, g.start,
"crazy horse likes mountains")
#no "some" means first branch of 'start' is taken...
#but second is also viable
self.assertEqual(e.position, 18)
self.assertEqual(e.error, [('expected', "token", "some"),
('expected', "token", "bananas"),
('expected', 'token', "bacon"),
('expected', "token", "robots"),
('expected', "token", "americans")])
def test_formattedReporting(self):
"""
Parse errors can be formatted into a nice human-readable view
containing the erroneous input and possible fixes.
"""
g = self.compile("""
dig ::= '1' | '2' | '3'
bits ::= <dig>+
""")
input = "123x321"
e = self.assertRaises(ParseError, g.bits, input)
self.assertEqual(e.formatError(input),
dedent("""
123x321
^
Parse error at line 1, column 3: expected one of '1', '3', or '2'
"""))
input = "foo\nbaz\nboz\ncharlie\nbuz"
e = ParseError(12, expected('token', 'foo') + expected(None, 'b'))
self.assertEqual(e.formatError(input),
dedent("""
charlie
^
Parse error at line 4, column 0: expected one of token 'foo', or 'b'
"""))
input = '123x321'
e = ParseError(3, expected('digit'))
self.assertEqual(e.formatError(input),
dedent("""
123x321
^
Parse error at line 1, column 3: expected a digit
"""))
| |
#!/usr/bin/python -tt
'''
File: non_comp_detect.py
Date: August 21, 2014
Description: this script reads in the parameters of compositional
combiner functions and word vectors, and for a given POS-tagged
corpus, computes phrasal representations for the phrases in the corpus.
It also compares these phrasal representations to a list of externally
computed "distances" between directly learned word2vec representations
for the phrases (the phrases can be output with the -v flag, and distances
to word2vec phrases can be computed with analysis/compute_distances.py),
and outputs the correlation with these distances.
'''
import sys, commands, string, getopt, math, gzip, re
import numpy as np
import scipy as sp
import multiprocessing as mp
import extract_training
from compute_composed import *
'''
Used to hold the most frequent words (as determined by an external
tool, e.g., SRILM or inbuilt unix tools) and for stop-word filtering.
'''
class Unigrams:
def __init__(self, filename, numStop):
fh = open(filename, 'rb')
counter = 0
self.stopwords = []
self.numStop = numStop
self.unigrams = {}
for line in fh:
elements = line.strip().split('\t')
self.unigrams[elements[0]] = float(elements[1])
if counter < self.numStop and elements[0] != "<s>" and elements[0] != "</s>":
self.stopwords.append(elements[0])
counter += 1
normalizer = sum(self.unigrams.values())
for key, value in self.unigrams.items():
self.unigrams[key] = value / normalizer
def checkStopWord(self, word):
return word in self.stopwords
'''
log likelihood-based scorer
'''
def scoreSkipGram(context, phraseRep, model):
aggregate_score = 0.0
for word in context:
if word in model.contextVecs:
wordRep = model.contextVecs[word]
aggregate_score += np.dot(wordRep, phraseRep)
return aggregate_score
'''
cosine similarity-based scorer; does not take into account context
'''
def scoreCosineSim(phrase, phraseRep, model, alpha):
words = phrase.split()
avgCosSim = 0
if len(words) > 2: #then equal weighting
for word in words:
wordVec = model.wordVecs[word]
cosSim = np.divide(np.dot(wordVec, phraseRep), np.linalg.norm(wordVec) * np.linalg.norm(phraseRep))
avgCosSim += cosSim
return avgCosSim / len(words)
else: #if length 2, then can put variable weights
wordVec1 = model.wordVecs[words[0]]
wordVec2 = model.wordVecs[words[1]]
sim1 = np.divide(np.dot(wordVec1, phraseRep), np.linalg.norm(wordVec1) * np.linalg.norm(phraseRep))
sim2 = np.divide(np.dot(wordVec2, phraseRep), np.linalg.norm(wordVec2) * np.linalg.norm(phraseRep))
avgCosSim = alpha*sim1 + (1-alpha)*sim2
return avgCosSim
'''
macro for function below
'''
def checkArity(rule):
return len(re.findall(r'\[([^]]+)\]', rule))
'''
used for extracting phrases that we want to score for a given sentence (arg: filehandle to .gz per-sentence grammar file)
'''
def processRules(grammar_fh, distanceCorr):
seen_rules = []
preterm_rules = []
for rule in grammar_fh:
src_rule = rule.strip().split(' ||| ')[1]
if not src_rule in seen_rules:
seen_rules.append(src_rule)
if not distanceCorr:
if checkArity(src_rule) == 0 and len(src_rule.split()) > 1:
preterm_rules.append(src_rule)
else: #if doing distanceCorr, then only consider bigrams
if checkArity(src_rule) == 0 and len(src_rule.split()) == 2:
preterm_rules.append(src_rule)
return preterm_rules
'''
extracts context from a sentence. Optionally filters for stop words in the context.
'''
def extractContext(words, start_idx, end_idx, context_size, model, stopW):
start = start_idx-1
left_context = []
while start > -1: #extract left context
left_word = words[start]
if left_word in model.contextVecs:
if stopW.numStop > 0:
if not stopW.checkStopWord(left_word):
left_context.append(left_word)
else:
left_context.append(left_word)
if len(left_context) == context_size:
break
start -= 1
end = end_idx
right_context = []
while end < len(words): #extract right context
right_word = words[end]
if right_word in model.contextVecs:
if stopW.numStop > 0:
if not stopW.checkStopWord(right_word):
right_context.append(right_word)
else:
right_context.append(right_word)
if len(right_context) == context_size:
break
end += 1
return left_context + right_context
'''
simple function to extract the indices of a subsequence given a sequence.
This is used so that we can extract POS tags for a given word sequence
from a word-POS sentence.
'''
def containsSequence(subseq, seq):
for i in xrange(len(seq)-len(subseq)+1):
for j in xrange(len(subseq)):
if seq[i+j] != subseq[j]:
break
else:
return i, i+len(subseq)
return -1, -1
'''
this variant of the normalizer function is called when we are dealing with a thread
'''
def computeNormalizerThread(model, phrase_tuples, uniModel, uniCorrection, out_q):
revised_tuples = []
for phrase, phrase_pos, context, phraseRep, score, sentNum in phrase_tuples:
normalizer = 0
for word in model.contextVecs: #guaranteed that word is in context because this criterion is checked in extractContext
contextVec = model.contextVecs[word]
normalizer += math.exp(np.dot(contextVec, phraseRep))
normalized_score = score - len(context)*math.log(normalizer)
if uniCorrection:
uniLogProb = 0
for word in context:
if word in uniModel.unigrams:
uniLogProb += math.log(uniModel.unigrams[word])
normalized_score -= uniLogProb
revised_tuples.append((phrase, phrase_pos, context, phraseRep, normalized_score, sentNum))
out_q.put(revised_tuples)
'''
controller/wrapper function for computeNormalizerThread
'''
def computeNormalizerParallel(phrase_tuples, numJobs, model, uniModel, uniCorrection):
out_q = mp.Queue()
procs = []
revised_tuples = []
chunksize = int(math.floor(len(phrase_tuples) / float(numJobs)))
for proc in range(numJobs):
end = len(phrase_tuples) if proc == numJobs-1 else (proc+1)*chunksize
tuples_proc = phrase_tuples[chunksize*proc:end]
p = mp.Process(target=computeNormalizerThread, args=(model, tuples_proc, uniModel, uniCorrection, out_q))
procs.append(p)
p.start()
for proc in range(numJobs):
revised_tuples += out_q.get()
for p in procs:
p.join()
return revised_tuples
'''
single-thread version of computing normalizer; deprecated
'''
def computeNormalizer(model, phraseRep, numJobs):
normalizer = 0
for word in model.contextVecs:
contextVec = model.contextVecs[word]
normalizer += math.exp(np.dot(contextVec, phraseRep))
return normalizer
'''
function that goes through a corpus, extracts relevant phrases from the per-sentence grammars,
and scores each of those phrases in its appropriate context by first computing the phrasal
representation and then calling the scorer.
The function can also be used to just print the relevant vectors.
'''
def scoreSegmentations(model, uniModel, numContext, grammar_loc, printOnly, cosine, distanceCorr, writePSG):
line_counter = 0
phrase_tuples = []
for line in sys.stdin: #each line is a POS-tagged sentence (sequence of word#POS pairs)
phrase_context = None
if not writePSG:
phrase_context = line.strip().split(' ||| ')
phrases = processRules(gzip.open(grammar_loc + "grammar.%d.gz"%line_counter, 'rb'), distanceCorr) if writePSG else [phrase_context[0]]
words, pos_tags = zip(*[word_pos.split('#') for word_pos in line.strip().split()]) if writePSG else zip(*[word_pos.split('#') for word_pos in phrase_context[1].split()])
for phrase in phrases: #phrase can have NTs in it
if model.checkVocab(phrase):
phrase_words = phrase.split()
start, end = containsSequence(phrase_words, words) #if we can get this information from the PSG, then it would be much easier
if start > -1 and end > -1:
phrase_pos = [extract_training.collapsePOS(pos) for pos in pos_tags[start:end]]
phraseRep = model.computeComposedRep(phrase, ' '.join(phrase_pos))
if printOnly:
model.printVector('_'.join(phrase_words), phraseRep)
else:
context = extractContext(words, start, end, numContext, model, uniModel)
score = scoreCosineSim(phrase, phraseRep, model, cosine) if cosine >= 0 else scoreSkipGram(context, phraseRep, model)
phrase_tuples.append((phrase, ' '.join(phrase_pos), context, phraseRep, score, line_counter))
line_counter += 1
return phrase_tuples
'''
prints the relevant scores and distances out
'''
def printScoresAndDistances(revised_tuples, model, numContext, averaging, perplexity, distanceDict, printFullOnly, printPOSOnly):
scores = []
distances = []
pos_scores_dist = {}
for phrase, phrase_pos, context, phraseRep, score, sentNum in revised_tuples:
numWordsInContext = len(context)
if perplexity:
score = math.exp(-score / numWordsInContext)
scores.append(score)
elif averaging:
score /= numWordsInContext
scores.append(-score)
else:
scores.append(-score)
distance = distanceDict[phrase] if phrase in distanceDict else -1
distances.append(distance)
pos_list = pos_scores_dist[phrase_pos] if phrase_pos in pos_scores_dist else []
pos_list.append((scores[-1], distances[-1]))
pos_scores_dist[phrase_pos] = pos_list
if not printPOSOnly:
if printFullOnly:
if numWordsInContext == numContext:
print "%s\t%s\t%.3f\t%.3f \t%s"%(phrase, phrase_pos, score, distance, ' '.join(context))
else:
print "%s\t%s\t%.3f\t%.3f \t%s"%(phrase, phrase_pos, score, distance, ' '.join(context))
return pos_scores_dist, scores, distances
def printPOSInfo(pos_scores_dist):
for pos_pair in pos_scores_dist:
pos_scores, pos_distances = zip(*pos_scores_dist[pos_pair])
if len(pos_scores) > 1:
pos_coeff, pos_pval = sp.stats.stats.pearsonr(pos_scores, pos_distances)
print "%s\t%d\t%.3f\t%.3f"%(pos_pair, len(pos_scores), pos_coeff, sum(pos_distances) / len(pos_distances))
'''
for MT integration, so that the non-compositionality scores can be used downstream
in MT decoding.
To Do: add optionality handling for baseline generation (no SegScore, just SegOn or NoSeg)
'''
def writePerSentenceGrammar(loc_in, loc_out, phrase_tuples, printComposed, averaging, perplexity, binning, numBins, featNT):
sentDict = {}
vecSize = len(phrase_tuples[0][3])
for phrase, phrase_pos, context, phraseRep, score, sentNum in phrase_tuples: #input data to write out is in these tuples; we need to reformat
phraseDict = sentDict[sentNum] if sentNum in sentDict else {}
if len(context) > 0:
if perplexity:
score = math.exp(-score / len(context))
score /= 100000 #just for feature scaling purposes
elif averaging:
score /= len(context)
phraseDict[phrase] = score if not printComposed else (score, phraseRep)
sentDict[sentNum] = phraseDict
if binning != "": #binning the values if asked
for sentNum in sentDict:
phraseDict = sentDict[sentNum]
phrase_scores = phraseDict.items()
scores = [key_val[1] for key_val in phrase_scores] if not printComposed else [key_val[1][1] for key_val in phrase_scores]
bins = []
if binning == "width": #compute the bin start and end values using numpy histogram
histo, bins = np.histogram(scores, numBins)
else: #can only be size since binning != ""; figure out bin widths based on equal size (elements per bin)
numElementsPerBin = float(len(scores)) / numBins
probs = np.array(range(numBins+1))*numElementsPerBin
probs /= float(len(scores)) #do this based on probabilities;
bins = sp.stats.mstats.mquantiles(scores, probs)
binned_scores = np.digitize(scores, bins)
for idx, key in enumerate(phrase_scores): #bin the values
phrase = key[0]
score = binned_scores[idx]
if score > numBins:
score = numBins
phraseDict[phrase] = score if not printComposed else (score, key[1][1])
sentDict[sentNum] = phraseDict
numSentences = max(sentDict.keys())
for line_counter in xrange(numSentences+1):
grammar_fh = gzip.open(loc_in+"grammar.%d.gz"%line_counter, 'rb')
out_fh = gzip.open(loc_out+"grammar.%d.gz"%line_counter, 'w')
if line_counter not in sentDict: #to handle sentences where we do not extract rule with any phrase longer than length 1 - just copy and paste the original rule with NoSeg feature on
for rule in grammar_fh:
elements = rule.strip().split(' ||| ')
features = elements[3]
features += " NoSeg=1"
arrayToPrint = elements[:3] + [features] + elements[4:]
lineToPrint = ' ||| '.join(arrayToPrint)
out_fh.write("%s\n"%lineToPrint)
grammar_fh.close()
out_fh.close()
else:
phraseDict = sentDict[line_counter]
for rule in grammar_fh:
elements = rule.strip().split(' ||| ')
src_rule = elements[1]
features = elements[3]
if featNT: #featurize NTs
subPhrases = re.split(r'\[(?:[^]]+)\]', src_rule) #split rule into lexical items divided by NTs
score = 0
counter = 0
composedRep = np.zeros(vecSize)
for subphrase in subPhrases:
if subphrase.strip() in phraseDict: #if the subphrase has a segmentation score, i.e., it is also a pre-terminal
counter += 1
score += phraseDict[subphrase.strip()] if not printComposed else phraseDict[subphrase.strip()][0]
if printComposed:
composedRep += phraseDict[subphrase.strip()][1]
if counter > 0: #valid segmentation score
features += " SegScore=%.3f SegOn=1"%(score / counter) #average over scores for each phrase in rule
#features += " SegOn=1" #for baseline purposes
if printComposed: #for printing the composed representation as a feature
composedRep = np.divide(composedRep, counter)
for featName, featVal in enumerate(composedRep):
features += " Dimension%d=%.3f"%(featName, featVal)
else:
features += " NoSeg=1"
elif src_rule in phraseDict:
if printComposed:
features += " SegScore=%.3f SegOn=1"%phraseDict[src_rule][0]
composedRep = phraseDict[src_rule][1]
for featName, featVal in enumerate(composedRep):
features += " Dimension%d=%.3f"%(featName, featVal)
else:
features += " SegScore=%.3f SegOn=1"%phraseDict[src_rule]
else:
features += " NoSeg=1"
arrayToPrint = elements[:3] + [features] + elements[4:]
lineToPrint = ' ||| '.join(arrayToPrint)
out_fh.write("%s\n"%lineToPrint)
grammar_fh.close()
out_fh.close()
'''
similar function to above, except here we directly write out the non-compositionality scores
of certain noun-noun and adjective-noun pairs given a context (all the preprocessing has been
done beforehand). This is for evaluation and comparison with human scorers.
'''
def writeNonCompScores(phrase_tuples, averaging, perplexity, binning, numBins):
phraseDict = {}
for phrase, phrase_pos, context, phraseRep, score, sentNum in phrase_tuples: #restructure into more suitable output
if len(context) > 0:
if perplexity:
score = math.exp(-score / len(context))
elif averaging:
score /= len(context)
scores = phraseDict[phrase] if phrase in phraseDict else []
scores.append(score)
phraseDict[phrase] = scores
if binning != "": #for binning
phrases = [] #for ordering purposes
scores = []
binned_phraseDict = {}
for phrase in phraseDict: #reformat data
scores_per_phrase = phraseDict[phrase]
for score in scores_per_phrase:
phrases.append(phrase)
scores.append(score)
bins = []
if binning == "width":
histo, bins = np.histogram(scores, numBins)
else: #can only be size since binning != ""
numElementsPerBin = float(len(scores)) / numBins
probs = np.array(range(numBins+1))*numElementsPerBin
probs /= float(len(scores))
bins = sp.stats.mstats.mquantiles(scores, probs)
binned_scores = np.digitize(scores, bins)
for idx, phrase in enumerate(phrases):
score = binned_scores[idx]
if score > numBins: #should only happen once, to max(scores)
score = numBins
scores_per_phrase = binned_phraseDict[phrase] if phrase in binned_phraseDict else []
scores_per_phrase.append(score)
binned_phraseDict[phrase] = scores_per_phrase
phraseDict = binned_phraseDict
for phrase in phraseDict: #print out all the phrase sin phrasedict
scores = phraseDict[phrase]
averageScore = sum(scores) / len(scores)
print "%s ||| %.3f"%(phrase, averageScore)
def main():
(opts, args) = getopt.getopt(sys.argv[1:], 'aAb:B:cC:d:fhl:Mn:NpPrs:uvVw:')
concat = False
rightBranch = False
numContext = 2
numStop = 20
averaging = False
binning = ""
numBins = 10 #default number of bins
uniCorrection = False
numJobs = -1
perplexity = False #-P
cosine = -1
headed = False
featNTs = False
distanceCorr = ""
printVecOnly = False #-v
printFullOnly = False #-f
printPOSOnly = False #-p
printComposedRep = False
addModel = False
multModel = False
writePSG = ""
for opt in opts:
if opt[0] == '-a':
averaging = True
elif opt[0] == '-b': #enable binning; argument is the type of bin
binning = opt[1]
elif opt[0] == '-B': #can change bin size with this option
numBins = int(opt[1])
elif opt[0] == '-c':
concat = True
elif opt[0] == '-r':
rightBranch = True
elif opt[0] == '-l':
numContext = int(opt[1])
elif opt[0] == '-s': #if numStop = 0, then no stop words used
numStop = int(opt[1])
elif opt[0] == '-u': #unigram correction
uniCorrection = True
elif opt[0] == '-n': #normalize the probabilities
numJobs = int(opt[1]) #arg: number of processes to use
elif opt[0] == '-P': #perplexity calculation
perplexity = True
elif opt[0] == '-C':
cosine = float(opt[1])
elif opt[0] == '-h':
headed = True
elif opt[0] == '-d':
distanceCorr = opt[1]
elif opt[0] == '-p':
printPOSOnly = True
elif opt[0] == '-v':
printVecOnly = True
elif opt[0] == '-f':
printFullOnly = True
elif opt[0] == '-w':
writePSG = opt[1]
elif opt[0] == '-V':
printComposedRep = True
elif opt[0] == '-N': #featurize phrases with NTs
featNTs = True
elif opt[0] == '-M': #multiplicative model
multModel = True
elif opt[0] == '-A': #simple additive model
addModel = True
model = CompoModel(args[2], concat, True, headed, rightBranch, multModel, addModel)
model.readVecFile(args[0], "word")
model.readVecFile(args[1], "context")
uniModel = Unigrams(args[3], numStop)
grammar_loc_in = args[4]
distanceDict = {}
if distanceCorr != "":
for line in open(distanceCorr, 'rb'): #read in distances
elements = line.strip().split('\t')
distanceDict[elements[0]] = float(elements[1])
if uniCorrection and numJobs < 0:
sys.stderr.write("Error! Unigram correction only valid if normalization used\n")
sys.exit()
if averaging and perplexity:
sys.stderr.write("Error! Cannot do both averaging and perplexity score at the same time\n")
sys.exit()
if distanceCorr != "" and writePSG != "":
sys.stderr.write("Error! Cannot do both distance correlation computation and writing per-sentence grammar; disable one\n")
sys.exit()
if binning != "" and not (binning == "width" or binning == "size"):
sys.stderr.write("Error! Argument to '-b' option needs to be either 'width' or 'size'\n")
sys.exit()
if printComposedRep and writePSG == "":
sys.stderr.write("Note: cannot print composed representation of phrase when not writing out per-sentence grammar; ignored\n")
phrase_tuples = scoreSegmentations(model, uniModel, numContext, grammar_loc_in, printVecOnly, cosine, distanceCorr != "", writePSG != "")
sys.stderr.write("Scored phrases in context\n")
if not printVecOnly:
revised_tuples = computeNormalizerParallel(phrase_tuples, numJobs, model, uniModel, uniCorrection) if numJobs > 0 else phrase_tuples
sys.stderr.write("Normalized phrase scores (if requested)\n")
if distanceCorr != "":
pos_score_dist, scores, distances = printScoresAndDistances(revised_tuples, model, numContext, averaging, perplexity, distanceDict, printFullOnly, printPOSOnly)
if printPOSOnly:
printPOSInfo(pos_score_dist)
coeff, pval = sp.stats.stats.pearsonr(scores, distances)
print "Out of %d samples, correlation between compositional model score and distance is %.3f (%.3f)"%(len(scores), coeff, pval)
print "Average distance between directly learned representations and composed representations: %.3f"%(sum(distances) / len(distances))
if writePSG != "":
writePerSentenceGrammar(grammar_loc_in, writePSG, revised_tuples, printComposedRep, averaging, perplexity, binning, numBins, featNTs)
sys.stderr.write("Wrote per-sentence grammars\n")
else: #then just write out phrase ||| average score over context sentences
writeNonCompScores(revised_tuples, averaging, perplexity, binning, numBins)
if __name__ == "__main__":
main()
| |
# Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import logging
import functools
from proboscis import before_class
from proboscis import test
from proboscis import SkipTest
from proboscis import TestProgram
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_true
from proboscis.asserts import Check
from troveclient.compat import TroveHTTPClient
from troveclient.compat import client as trove_client
from trove.tests.config import CONFIG
from troveclient.compat import Dbaas
from trove.tests.examples.client import SnippetWriter
from trove.tests.examples.client import JsonClient
trove_client._logger.setLevel(logging.CRITICAL)
FAKE_INFO = {'m': 30, 's': 0, 'uuid': 'abcdef00-aaaa-aaaa-aaaa-bbbbbbbbbbbb'}
EXAMPLE_BACKUP_ID = "a9832168-7541-4536-b8d9-a8a9b79cf1b4"
EXAMPLE_BACKUP_INCREMENTAL_ID = "2e351a71-dd28-4bcb-a7d6-d36a5b487173"
EXAMPLE_CONFIG_ID = "43a6ea86-e959-4735-9e46-a6a5d4a2d80f"
EXAMPLE_INSTANCE_ID = "44b277eb-39be-4921-be31-3d61b43651d7"
EXAMPLE_INSTANCE_ID_2 = "d5a9db64-7ef7-41c5-8e1e-4013166874bc"
EXAMPLE_CONFIG_SERVER_ID = "271898715"
def get_now():
from datetime import datetime
return datetime(2014, 10, 30, hour=12, minute=FAKE_INFO['m'],
second=FAKE_INFO['s'])
def get_uuid():
return FAKE_INFO['uuid']
def set_fake_stuff(uuid=None, minute=None, unique_id=None):
if uuid:
FAKE_INFO['uuid'] = uuid
if minute:
FAKE_INFO['minute'] = minute
if unique_id:
from trove.common.template import SingleInstanceConfigTemplate
def fake_calc_id(self):
return unique_id
SingleInstanceConfigTemplate._calculate_unique_id = fake_calc_id
def monkey_patch_uuid_and_date():
import uuid
uuid.uuid4 = get_uuid
from trove.common import utils
utils.utcnow = get_now
utils.generate_uuid = get_uuid
@test
def load_config_file():
global conf
if CONFIG.get("examples", None) is None:
fail("Missing 'examples' config in test config.")
conf = CONFIG.examples
global normal_user
normal_user = CONFIG.users.find_user_by_name(conf['normal_user_name'])
global admin_user
admin_user = CONFIG.users.find_user_by_name(conf['admin_user_name'])
def create_client_args(user):
auth_strategy = None
kwargs = {
'service_type': 'trove',
'insecure': CONFIG.values['trove_client_insecure'],
}
def set_optional(kwargs_name, test_conf_name):
value = CONFIG.values.get(test_conf_name, None)
if value is not None:
kwargs[kwargs_name] = value
service_url = CONFIG.get('override_trove_api_url', None)
if user.requirements.is_admin:
service_url = CONFIG.get('override_admin_trove_api_url',
service_url)
if service_url:
kwargs['service_url'] = service_url
auth_strategy = None
if user.requirements.is_admin:
auth_strategy = CONFIG.get('admin_auth_strategy',
CONFIG.auth_strategy)
else:
auth_strategy = CONFIG.auth_strategy
set_optional('region_name', 'trove_client_region_name')
if CONFIG.values.get('override_trove_api_url_append_tenant',
False):
kwargs['service_url'] += "/" + user.tenant
if auth_strategy == 'fake':
from troveclient.compat import auth
class FakeAuth(auth.Authenticator):
def authenticate(self):
class FakeCatalog(object):
def __init__(self, auth):
self.auth = auth
def get_public_url(self):
return "%s/%s" % (CONFIG.dbaas_url,
self.auth.tenant)
def get_token(self):
return self.auth.tenant
return FakeCatalog(self)
auth_strategy = FakeAuth
if auth_strategy:
kwargs['auth_strategy'] = auth_strategy
if not user.requirements.is_admin:
auth_url = CONFIG.trove_auth_url
else:
auth_url = CONFIG.values.get('trove_admin_auth_url',
CONFIG.trove_auth_url)
if CONFIG.values.get('trove_client_cls'):
cls_name = CONFIG.trove_client_cls
kwargs['client_cls'] = import_class(cls_name)
kwargs['tenant'] = user.tenant
kwargs['auth_url'] = auth_url
return (user.auth_user, user.auth_key), kwargs
def create_client(cls, user):
args, kwargs = create_client_args(user)
kwargs['client_cls'] = cls
client = Dbaas(*args, **kwargs)
return client
def make_client(user):
#snippet_writer = SnippetWriter(conf)
args, kwargs = create_client_args(user)
kwargs['client_cls'] = JsonClient
client = Dbaas(*args, **kwargs)
client.client.name = "auth"
#client.client.snippet_writer = snippet_writer
client.authenticate()
return client
def write_snippet(get_replace_list, client, name, url, method, status, reason,
func, *func_args):
"""
'name' is the name of the file, while 'url,' 'method,' 'status,'
and 'reason' are expected values that are asserted against.
If func_args is present, it is a list of lists, each one of which
is passed as the *args to the two invocations of "func".
"""
func_args = func_args or []
snippet_writer = SnippetWriter(conf, get_replace_list)
results = []
client.client.snippet_writer = snippet_writer
client.client.name = name
args = func_args
result = func(client, *args)
# Now write the snippet (if this happens earlier we can't replace
# data such as the instance ID).
client.client.write_snippet()
with Check() as check:
check.equal(client.client.old_info['url'], url)
check.equal(client.client.old_info['method'], method)
check.equal(client.client.old_info['response_headers'].status,
status)
check.equal(client.client.old_info['response_headers'].reason,
reason)
results.append(result)
# To prevent this from writing a snippet somewhere else...
client.client.name = "junk"
return results
JSON_INDEX = 0
class Example(object):
@classmethod
def get_replace_list(cls):
return []
def snippet(self, *args, **kwargs):
return write_snippet(self.get_replace_list, self.client,
*args, **kwargs)
@test(depends_on=[load_config_file], enabled=False)
class Versions(Example):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def get_versions(self):
self.snippet(
"versions",
"", "GET", 200, "OK",
lambda client: client.versions.index(conf['version_url']))
@test
def get_version(self):
def version_call(client):
return client.versions.index(conf['version_url'] + "/v1.0/")
self.snippet("versions", "/v1.0", "GET", 200, "OK", get_version)
@test(depends_on=[load_config_file])
class Flavors(Example):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def get_flavors(self):
self.snippet(
"flavors",
"/flavors", "GET", 200, "OK",
lambda client: client.flavors.list())
@test
def get_flavor_by_id(self):
self.snippet(
"flavors_by_id",
"/flavors/1", "GET", 200, "OK",
lambda client: client.flavors.get(1))
@test(depends_on=[load_config_file])
def clean_slate():
client = create_client(TroveHTTPClient, admin_user)
client.client.name = "list"
instances = client.instances.list()
assert_equal(0, len(instances), "Instance count must be zero.")
@test(depends_on=[clean_slate])
class CreateInstance(Example):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def post_create_instance(self):
set_fake_stuff(uuid=EXAMPLE_INSTANCE_ID)
def create_instance(client, name):
instance = client.instances.create(
name, 1,
volume={'size': 2},
databases=[
{
"name": "sampledb",
"character_set": "utf8",
"collate": "utf8_general_ci"
},
{
"name": "nextround"
}
],
users=[
{
"databases": [{"name": "sampledb"}],
"name": "demouser",
"password": "demopassword"
}
])
assert_equal(instance.status, "BUILD")
return instance
self.instances = self.snippet(
"create_instance",
"/instances", "POST", 200, "OK",
create_instance,
"json_rack_instance")
def an_instance_is_not_active(self):
for instance in self.instances:
instance = self.client.instances.get(instance.id)
if instance.status != "ACTIVE":
assert_equal(instance.status, "BUILD")
return True
return False
@test(depends_on=[post_create_instance])
def wait_for_instances(self):
while self.an_instance_is_not_active():
time.sleep(1)
global json_instance
json_instance = self.instances[0]
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class Databases(Example):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def post_create_databases(self):
self.snippet(
"create_databases",
"/instances/%s/databases" % json_instance.id,
"POST", 202, "Accepted",
lambda client: client.databases.create(
json_instance.id,
databases=
[
{
"name": "testingdb",
"character_set": "utf8",
"collate": "utf8_general_ci"
}, {
"name": "anotherdb"
}, {
"name": "oneMoreDB"
}
]))
@test(depends_on=[post_create_databases])
def get_list_databases(self):
self.snippet(
"list_databases",
"/instances/%s/databases" % json_instance.id,
"GET", 200, "OK",
lambda client: client.databases.list(json_instance.id))
@test(depends_on=[post_create_databases])
def get_list_databases_limit_two(self):
results = self.snippet(
"list_databases_pagination",
"/instances/%s/databases?limit=1" % json_instance.id,
"GET", 200, "OK",
lambda client: client.databases.list(json_instance.id, limit=1))
assert_equal(1, len(results[JSON_INDEX]))
assert_equal("anotherdb", results[JSON_INDEX].next)
@test(depends_on=[post_create_databases],
runs_after=[get_list_databases, get_list_databases_limit_two])
def delete_databases(self):
self.snippet(
"delete_databases",
"/instances/%s/databases/testingdb" % json_instance.id,
"DELETE", 202, "Accepted",
lambda client:
client.databases.delete(json_instance.id, 'testingdb'))
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class Users(Example):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def post_create_users(self):
self.snippet(
"create_users",
"/instances/%s/users" % json_instance.id,
"POST", 202, "Accepted",
lambda client: client.users.create(
json_instance.id,
[{
"name": "dbuser1",
"password": "password",
"databases": [
{
"name": "databaseA"
}
]
}, {
"name": "dbuser2",
"password": "password",
"databases": [
{
"name": "databaseB"
},
{
"name": "databaseC"
}
]
}, {
"name": "dbuser3",
"password": "password",
"databases": [
{
"name": "databaseD"
}
]
}]))
@test(depends_on=[post_create_users])
def get_list_users(self):
self.snippet(
"list_users",
"/instances/%s/users" % json_instance.id,
"GET", 200, "OK",
lambda client: client.users.list(json_instance.id))
@test(depends_on=[post_create_users])
def get_list_users_limit_two(self):
self.snippet(
"list_users_pagination",
"/instances/%s/users?limit=2" % json_instance.id,
"GET", 200, "OK",
lambda client: client.users.list(json_instance.id, limit=2))
@test(depends_on=[post_create_users],
runs_after=[get_list_users, get_list_users_limit_two])
def delete_users(self):
user_name = "demouser"
self.snippet(
"delete_users",
"/instances/%s/users/%s" % (json_instance.id, user_name),
"DELETE", 202, "Accepted",
lambda client: client.users.delete(json_instance.id,
username=user_name))
@test(depends_on=[post_create_users])
def modify_user_attributes(self):
old_user_name = "dbuser1"
self.snippet(
"change_user_attributes",
"/instances/%s/users/%s" % (json_instance.id, old_user_name),
"PUT", 202, "Accepted",
lambda client: client.users.update_attributes(
json_instance.id,
username=old_user_name,
newuserattr={
"name": "new_username",
"password": "new_password"
}
)
)
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class Root(Example):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def post_enable_root_access(self):
self.snippet(
"enable_root_user",
"/instances/%s/root" % json_instance.id,
"POST", 200, "OK",
lambda client: client.root.create(json_instance.id))
@test(depends_on=[post_enable_root_access])
def get_check_root_access(self):
results = self.snippet(
"check_root_user",
"/instances/%s/root" % json_instance.id,
"GET", 200, "OK",
lambda client: client.root.is_root_enabled(json_instance.id))
assert_equal(results[JSON_INDEX].rootEnabled, True)
class ActiveMixin(Example):
"""Adds a method to wait for instance status to become ACTIVE."""
def _wait_for_active(self, *acceptable_states):
global json_instance
json_instance = self.client.instances.get(json_instance.id)
print('instance.status=%s' % json_instance.status)
while json_instance.status != "ACTIVE":
assert_true(
json_instance.status in acceptable_states,
"Instance status == %s; expected it to be one of: %s"
% (json_instance.status, acceptable_states))
time.sleep(0.1)
json_instance = self.client.instances.get(json_instance.id)
def _wait_for_restore_active(self, *acceptable_states):
for instance in (self.json_restore, ):
instance = self.client.instances.get(instance.id)
print('instance.status=%s' % instance.status)
while instance.status != "ACTIVE":
assert_true(
instance.status in acceptable_states,
"Instance status == %s; expected it to be one of: %s"
% (instance.status, acceptable_states))
time.sleep(0.1)
instance = self.client.instances.get(instance.id)
STATE = {
"CONFIGURATION": None,
"DATASTORE_ID": None,
"DATASTORE_VERSION_ID": None,
}
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class Datastores(Example):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def get_datastores_list(self):
self.datastores = self.snippet(
"datastores_list",
"/datastores",
"GET", 200, "OK",
lambda client: client.datastores.list())
for result in self.datastores:
assert_equal(1, len(result))
@test(depends_on=[get_datastores_list])
def get_datastore_by_id(self):
ds, = self.datastores
mysql_ds = [x for x in ds if x.name == 'mysql']
if not mysql_ds:
fail('no mysql datastore found in list')
ds_id = STATE["DATASTORE_ID"] = mysql_ds[JSON_INDEX].id
self.datastore = self.snippet(
"datastore_by_id",
"/datastores/%s" % ds_id,
"GET", 200, "OK",
lambda client: client.datastores.get(ds_id))
@test(depends_on=[get_datastore_by_id])
def get_datastore_versions_list(self):
ds_id = STATE["DATASTORE_ID"]
self.datastore_versions = self.snippet(
"datastore_versions_list",
"/datastores/%s/versions" % ds_id,
"GET", 200, "OK",
lambda client: client.datastore_versions.list(ds_id))
@test(depends_on=[get_datastore_versions_list])
def get_datastore_version_by_id(self):
ds_id = STATE["DATASTORE_ID"]
ds_v_id = STATE["DATASTORE_VERSION_ID"] = (
self.datastore_versions[JSON_INDEX][0].id
)
self.datastore_version = self.snippet(
"datastore_version_by_id",
"/datastores/%s/versions/%s" % (ds_id, ds_v_id),
"GET", 200, "OK",
lambda client: client.datastore_versions.get(ds_id, ds_v_id))
@test(depends_on=[Datastores], groups=['uses_instances'])
class Configurations(ActiveMixin):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def get_configuration_parameters_for_datastore_version(self):
ds_id = STATE["DATASTORE_ID"]
ds_v_id = STATE["DATASTORE_VERSION_ID"]
self.snippet(
"configuration_parameters_for_datastore_version",
"/datastores/%s/versions/%s/parameters" % (ds_id, ds_v_id),
"GET", 200, "OK",
lambda client: client.configuration_parameters.parameters(
ds_id, ds_v_id
)
)
@test
def get_configuration_parameters_without_datastore_version(self):
ds_v_id = STATE["DATASTORE_VERSION_ID"]
self.params = self.snippet(
"configuration_parameters_without_datastore_version",
"/datastores/versions/%s/parameters" % (ds_v_id),
"GET", 200, "OK",
lambda client: (
client.configuration_parameters.parameters_by_version(ds_v_id)
)
)
assert_true(self.params)
@test(depends_on=[get_configuration_parameters_without_datastore_version])
def get_configuration_parameter_for_datastore_version(self):
ds_id = STATE["DATASTORE_ID"]
ds_v_id = STATE["DATASTORE_VERSION_ID"]
param = self.params[JSON_INDEX][0].name
self.snippet(
"configuration_parameter_for_datastore_version",
"/datastores/%s/versions/%s/parameters/%s"
% (ds_id, ds_v_id, param),
"GET", 200, "OK",
lambda client: client.configuration_parameters.get_parameter(
ds_id, ds_v_id, param))
@test(depends_on=[get_configuration_parameters_without_datastore_version])
def get_configuration_parameter_without_datastore_version(self):
ds_v_id = STATE["DATASTORE_VERSION_ID"]
param = self.params[JSON_INDEX][0].name
def get_param(client):
return client.configuration_parameters.get_parameter_by_version(
ds_v_id,
param
)
self.params = self.snippet(
"configuration_parameter_without_datastore_version",
"/datastores/versions/%s/parameters/%s" % (ds_v_id, param),
"GET", 200, "OK",
get_param
)
@test(depends_on=[get_configuration_parameter_without_datastore_version])
def create_configuration(self):
set_fake_stuff(uuid=EXAMPLE_CONFIG_ID)
ds_id = STATE["DATASTORE_ID"]
ds_v_id = STATE["DATASTORE_VERSION_ID"]
values = {
"connect_timeout": 120,
"collation_server": "latin1_swedish_ci"
}
def create(client):
config = client.configurations.create(
'example-configuration-name', json.dumps(values),
'example description', ds_id, ds_v_id)
return config
self.configurations = self.snippet(
"configuration_create",
"/configurations",
"POST", 200, "OK",
create)
STATE["CONFIGURATION"] = self.configurations[JSON_INDEX]
@test(depends_on=[create_configuration])
def get_configuration(self):
config = STATE["CONFIGURATION"]
self.config = self.snippet(
"configuration_details",
"/configurations/%s" % config.id,
"GET", 200, "OK",
lambda client: client.configurations.get(config.id))
@test(depends_on=[create_configuration])
def list_configurations(self):
self.configs = self.snippet(
"configuration_list",
"/configurations",
"GET", 200, "OK",
lambda client: client.configurations.list())
@test(depends_on=[list_configurations, get_configuration])
def edit_configuration(self):
config = STATE["CONFIGURATION"]
values = {
'connect_timeout': 300
}
self.snippet(
"configuration_edit_parameters",
"/configurations/%s" % config.id,
"PATCH", 200, "OK",
lambda client: client.configurations.edit(
config.id, json.dumps(values)))
@test(depends_on=[edit_configuration])
def update_configuration(self):
config = STATE["CONFIGURATION"]
values = {
'connect_timeout': 150,
'collation_server': 'utf8_unicode_ci'
}
self.snippet(
"configuration_update_parameters",
"/configurations/%s" % config.id,
"PUT", 202, "Accepted",
lambda client: client.configurations.update(
config.id, json.dumps(values),
'example-updated-name', 'example updated description'))
@test(depends_on=[update_configuration])
def attach_configuration_to_instance(self):
config = STATE["CONFIGURATION"]
self.snippet(
"configuration_attach_to_instance",
"/instances/%s" % json_instance.id,
"PUT", 202, "Accepted",
lambda client: client.instances.modify(
json_instance.id,
config.id
)
)
@test(depends_on=[attach_configuration_to_instance])
def list_configurations_instances(self):
config = STATE["CONFIGURATION"]
self.config_instances = self.snippet(
"configuration_list_instances",
"/configurations/%s/instances" % config.id,
"GET", 200, "OK",
lambda client: client.configurations.instances(config.id))
@test(depends_on=[list_configurations_instances])
def detach_configuration_from_instance(self):
self.snippet(
"configuration_detach_from_instance",
"/instances/%s" % json_instance.id,
"PUT", 202, "Accepted",
lambda client: client.instances.modify(
json_instance.id, ""))
@test(depends_on=[detach_configuration_from_instance])
def instance_restart_after_configration_change(self):
self.client.instances.restart(json_instance.id)
self._wait_for_active("REBOOT")
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class InstanceList(Example):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def get_list_instance_index(self):
results = self.snippet(
"instances_index",
"/instances", "GET", 200, "OK",
lambda client: client.instances.list())
for result in results:
assert_equal(1, len(result))
@test
def get_instance_details(self):
results = self.snippet(
"instance_status_detail",
"/instances/%s" % json_instance.id,
"GET", 200, "OK",
lambda client: client.instances.get(json_instance.id))
assert_equal(results[JSON_INDEX].id, json_instance.id)
@test
def get_default_instance_configuration(self):
set_fake_stuff(unique_id=EXAMPLE_CONFIG_SERVER_ID)
self.snippet(
"get_default_instance_configuration",
"/instances/%s/configuration" % json_instance.id,
"GET", 200, "OK",
lambda client: client.instances.configuration(json_instance.id))
@test
def get_list_instance_index_limit_two(self):
third_instance = self.client.instances.create(
"The Third Instance", 1, volume={'size': 2})
third_instance = self.client.instances.get(third_instance.id)
while third_instance.status != "ACTIVE":
time.sleep(0.1)
third_instance = self.client.instances.get(third_instance.id)
results = self.snippet(
"instances_index_pagination",
"/instances?limit=2", "GET", 200, "OK",
lambda client: client.instances.list(limit=2))
for result in results:
assert_equal(2, len(result))
self.client.instances.delete(third_instance.id)
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class Backups(ActiveMixin):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def create_backup(self):
set_fake_stuff(uuid=EXAMPLE_BACKUP_ID)
results = self.snippet(
"backup_create", "/backups", "POST", 202, "Accepted",
lambda client: client.backups.create(
name='snapshot',
instance=json_instance.id,
description="My Backup"
)
)
self._wait_for_active("BACKUP")
assert_equal(len(results), 1)
self.json_backup = results[JSON_INDEX]
@test
def create_incremental_backup(self):
set_fake_stuff(uuid=EXAMPLE_BACKUP_INCREMENTAL_ID)
results = self.snippet(
"backup_create_incremental", "/backups", "POST", 202, "Accepted",
lambda client: client.backups.create(
name='Incremental Snapshot',
instance=json_instance.id,
parent_id=EXAMPLE_BACKUP_ID,
description="My Incremental Backup"
)
)
self._wait_for_active("BACKUP")
assert_equal(len(results), 1)
self.json_backup = results[JSON_INDEX]
@test(depends_on=[create_backup])
def get_backup(self):
results = self.snippet(
"backup_get",
"/backups/%s" % self.json_backup.id,
"GET", 200, "OK",
lambda client: client.backups.get(self.json_backup.id))
assert_equal(len(results), 1)
@test(depends_on=[create_backup])
def get_backups_for_instance(self):
results = self.snippet(
"backups_by_instance",
"/instances/%s/backups" % json_instance.id,
"GET", 200, "OK",
lambda client: client.instances.backups(json_instance.id))
assert_equal(len(results), 1)
@test(depends_on=[create_backup])
def list_backups(self):
results = self.snippet(
"backup_list",
"/backups", "GET", 200, "OK",
lambda client: client.backups.list())
assert_equal(len(results), 1)
@test(depends_on=[create_backup])
def restore(self):
set_fake_stuff(uuid=EXAMPLE_INSTANCE_ID_2)
def create_instance(client, name, backup):
instance = client.instances.create(
name, 1,
volume={'size': 2},
restorePoint={'backupRef': backup})
assert_equal(instance.status, "BUILD")
return instance
results = self.snippet(
"backup_restore",
"/instances", "POST", 200, "OK",
lambda client: create_instance(
client, "backup_instance", self.json_backup.id))
assert_equal(len(results), 1)
self.json_restore = results[JSON_INDEX]
self._wait_for_restore_active("BUILD")
self.json_restore = self.client.instances.get(self.json_restore.id)
assert_equal(self.json_restore.status, "ACTIVE")
@test(depends_on=[restore])
def delete_restores(self):
self.snippet(
"restore_delete",
"/instances/%s" % self.json_restore.id,
"DELETE", 202, "Accepted",
lambda client: client.instances.delete(self.json_restore.id))
self.json_restore = self.client.instances.get(self.json_restore.id)
assert_equal(self.json_restore.status, "SHUTDOWN")
@test(depends_on=[create_backup],
runs_after=[get_backup, list_backups, restore,
get_backups_for_instance])
def delete_backup(self):
results = self.snippet(
"backup_delete",
"/backups/%s" % self.json_backup.id,
"DELETE", 202, "Accepted",
lambda client: client.backups.delete(self.json_backup.id))
assert_equal(len(results), 1)
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class Actions(ActiveMixin):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def instance_restart(self):
self.snippet(
"instance_restart",
"/instances/%s/action" % json_instance.id,
"POST", 202, "Accepted",
lambda client: client.instances.restart(json_instance.id))
self._wait_for_active("REBOOT")
@test
def instance_resize_volume(self):
self.snippet(
"instance_resize_volume",
"/instances/%s/action" % json_instance.id,
"POST", 202, "Accepted",
lambda client: client.instances.resize_volume(json_instance.id, 4))
self._wait_for_active("RESIZE")
assert_equal(json_instance.volume['size'], 4)
@test
def instance_resize_flavor(self):
self.snippet(
"instance_resize_flavor",
("/instances/%s/action" % json_instance.id),
"POST", 202, "Accepted",
lambda client: client.instances.resize_instance(
json_instance.id, 3))
self._wait_for_active("RESIZE")
# TODO(imsplitbit): remove coercion when troveclient fixes are in
assert_equal(int(json_instance.flavor['id']), 3)
@test(depends_on=[CreateInstance], groups=['uses_instances', "MgmtHosts"])
class MgmtHosts(Example):
@before_class
def setup(self):
self.client = make_client(admin_user)
@test
def mgmt_list_hosts(self):
results = self.snippet(
"mgmt_list_hosts",
"/mgmt/hosts", "GET", 200, "OK",
lambda client: client.mgmt.hosts.index())
with Check() as check:
for hosts in results:
check.equal(2, len(hosts))
check.true("fake_host_1" == hosts[0].name
or "fake_host_1" == hosts[1].name)
check.true("fake_host_2" == hosts[0].name
or "fake_host_2" == hosts[1].name)
check.true(1 == results[0][1].instanceCount
or 1 == results[0][0].instanceCount)
@test
def mgmt_get_host_detail(self):
results = self.snippet(
"mgmt_get_host_detail",
"/mgmt/hosts/fake_host_1", "GET", 200, "OK",
lambda client: client.mgmt.hosts.get("fake_host_1"))
with Check() as check:
for host in results:
check.equal(results[0].name, "fake_host_1")
# XML entries won't come back as these types. :(
check.true(isinstance(results[0].percentUsed, int)),
check.true(isinstance(results[0].totalRAM, int)),
check.true(isinstance(results[0].usedRAM, int)),
with Check() as check:
for host in results:
check.equal(1, len(host.instances))
for instance in host.instances:
check.equal(instance['status'], 'ACTIVE')
check.true(isinstance(instance['name'], basestring))
check.true(isinstance(instance['id'], basestring))
check.true(isinstance(instance['server_id'], basestring))
check.true(isinstance(instance['tenant_id'], basestring))
@test
def mgmt_host_update_all(self):
raise SkipTest("This isn't working... :(")
self.snippet(
"mgmt_host_update",
"/mgmt/hosts/fake_host_1/instances/action",
"POST", 202, "Accepted",
lambda client: client.mgmt.hosts.update_all("fake_host_1"))
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class MgmtStorage(Example):
@before_class
def setup(self):
self.client = make_client(admin_user)
@test
def mgmt_get_storage(self):
results = self.snippet(
"mgmt_get_storage",
"/mgmt/storage", "GET", 200, "OK",
lambda client: client.mgmt.storage.index())
for index, devices in enumerate(results):
with Check() as check:
check.equal(1, len(devices))
device = devices[0]
check.equal(int(device.capacity['available']), 90)
check.equal(int(device.capacity['total']), 100)
check.equal(device.name, "fake_storage")
check.equal(int(device.provision['available']), 40)
check.equal(int(device.provision['percent']), 10)
check.equal(int(device.provision['total']), 50)
check.equal(device.type, "test_type")
check.equal(int(device.used), 10)
if index == JSON_INDEX:
check.true(isinstance(device.capacity['available'], int))
check.true(isinstance(device.capacity['total'], int))
check.true(isinstance(device.provision['available'], int))
check.true(isinstance(device.provision['percent'], int))
check.true(isinstance(device.provision['total'], int))
check.true(isinstance(device.used, int))
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class MgmtAccount(Example):
@before_class
def setup(self):
self.client = make_client(admin_user)
@test
def mgmt_get_account_details(self):
results = self.snippet(
"mgmt_get_account_details",
"/mgmt/accounts/%s" % conf['normal_user_tenant'],
"GET", 200, "OK",
lambda client: client.mgmt.accounts.show(
conf['normal_user_tenant'], ))
with Check() as check:
for account_info in results:
check.equal(conf['normal_user_tenant'], account_info.id)
@test
def mgmt_get_account_list(self):
results = self.snippet(
"mgmt_list_accounts",
"/mgmt/accounts", "GET", 200, "OK",
lambda client: client.mgmt.accounts.index())
matches = {conf['normal_user_tenant']: 2,
conf['admin_user_tenant']: 0}
for index, result in enumerate(results):
for account in result.accounts:
if account['id'] not in matches:
fail("Did not expect this account ID: %s" % account['id'])
expected_count = matches[account['id']]
if index == JSON_INDEX:
assert_equal(2, expected_count)
else:
assert_equal(2, expected_count)
def for_both(func):
@functools.wraps(func)
def both(self):
for result in self.results:
func(self, result)
return both
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class MgmtInstance(Example):
@before_class
def mgmt_get_instance_details(self):
self.client = make_client(admin_user)
self.results = self.snippet(
"mgmt_get_instance_details",
("/mgmt/instances/%s" % json_instance.id),
"GET", 200, "OK",
lambda client: client.mgmt.instances.show(json_instance.id))
@test
@for_both
def created(self, result):
assert_true(isinstance(result.created, basestring))
@test
def deleted(self):
assert_equal(self.results[JSON_INDEX].deleted, False)
@test
@for_both
def flavor(self, result):
# TODO(imsplitbit): remove the coercion when python-troveclient fixes
# land in the public.
assert_true(
int(result.flavor['id']) == 1 or int(result.flavor['id']) == 3)
assert_equal(len(result.flavor['links']), 2)
@test
@for_both
def guest_status(self, result):
assert_equal(result.guest_status['state_description'], 'running')
@test(enabled=False)
@for_both
def host(self, result):
assert_equal(result.host, 'fake_host_1')
@test
def id(self):
assert_equal(self.results[JSON_INDEX].id, json_instance.id)
@test
@for_both
def links(self, result):
assert_true(isinstance(result.links, list))
for link in result.links:
assert_true(isinstance(link, dict))
assert_true(isinstance(link['href'], basestring))
assert_true(isinstance(link['rel'], basestring))
@test
def local_id(self):
assert_true(isinstance(self.results[JSON_INDEX].server['local_id'],
int))
@test
@for_both
def name(self, result):
assert_true(isinstance(result.name, basestring))
@test
@for_both
def server_id(self, result):
assert_true(isinstance(result.server['id'], basestring))
@test
@for_both
def status(self, result):
assert_equal("ACTIVE", result.status)
@test
@for_both
def task_description(self, result):
assert_equal(result.task_description, "No tasks for the instance.")
@test
@for_both
def tenant_id(self, result):
assert_equal(result.tenant_id, conf['normal_user_tenant'])
@test
@for_both
def updated(self, result):
assert_true(isinstance(result.updated, basestring))
@test
@for_both
def volume(self, result):
assert_true(isinstance(result.volume, dict))
assert_true('id' in result.volume)
assert_true('size' in result.volume)
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class MgmtInstanceIndex(Example):
@before_class
def setup(self):
self.client = make_client(admin_user)
@test
def mgmt_instance_index(self, deleted=False):
self.snippet(
"mgmt_instance_index",
"/mgmt/instances?deleted=false", "GET", 200, "OK",
lambda client: client.mgmt.instances.index(deleted=False))
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class MgmtInstanceDiagnostics(Example):
@before_class
def setup(self):
self.client = make_client(admin_user)
@test
def mgmt_get_instance_diagnostics(self):
self.snippet(
"mgmt_instance_diagnostics",
("/mgmt/instances/%s/diagnostics" % json_instance.id),
"GET", 200, "OK",
lambda client: client.diagnostics.get(json_instance.id))
@test(depends_on=[CreateInstance])
class MgmtInstanceRoot(Example):
@before_class
def setup(self):
self.client = make_client(admin_user)
@test
def mgmt_get_root_details(self):
self.snippet(
"mgmt_get_root_details",
("/mgmt/instances/%s/root" % json_instance.id),
"GET", 200, "OK",
lambda client: client.mgmt.instances.root_enabled_history(
json_instance.id)
)
@test(depends_on=[CreateInstance], enabled=False)
class MgmtInstanceHWInfo(Example):
@before_class
def setup(self):
self.client = make_client(admin_user)
@test
def mgmt_get_hw_info(self):
self.snippet(
"mgmt_get_hw_info",
("/mgmt/instances/%s/hwinfo" % json_instance.id),
"GET", 200, "OK",
lambda client, id: client.hw_info.get(id),
([json_instance.id], ))
@test(depends_on=[CreateInstance], groups=['uses_instances'])
class MgmtInstanceReboot(Example):
@before_class
def setup(self):
self.client = make_client(admin_user)
@test
def mgmt_instance_reboot(self):
self.snippet(
"instance_reboot",
("/mgmt/instances/%s/action" % json_instance.id),
"POST", 202, "Accepted",
lambda client: client.mgmt.instances.reboot(json_instance.id))
@test(depends_on=[CreateInstance],
groups=['uses_instances'], enabled=False)
class MgmtInstanceGuestUpdate(Example):
@before_class
def setup(self):
self.client = make_client(admin_user)
@test
def mgmt_instance_guest_update(self):
self.snippet(
"guest_update",
("/mgmt/instances/%s/action" % json_instance.id),
"POST", 202, "Accepted",
lambda client: client.mgmt.instances.update(json_instance.id))
@test(depends_on=[CreateInstance], runs_after_groups=['uses_instances'])
class ZzzDeleteInstance(Example):
@before_class
def setup(self):
self.client = make_client(normal_user)
@test
def zzz_delete_instance(self):
global json_instance
self.snippet(
"delete_instance",
"/instances/%s" % json_instance.id,
"DELETE", 202, "Accepted",
lambda client: client.instances.delete(json_instance.id))
json_instance = self.client.instances.get(json_instance.id)
assert_equal(json_instance.status, "SHUTDOWN")
@test(depends_on=[zzz_delete_instance])
def delete_configuration(self):
config = STATE["CONFIGURATION"]
self.configs = self.snippet(
"configuration_delete",
("/configurations/%s" % config.id),
"DELETE", 202, "Accepted",
lambda client: client.configurations.delete(config.id))
if __name__ == "__main__":
CONFIG.load_from_file("etc/tests/localhost.test.conf")
TestProgram().run_and_exit()
| |
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
if hasattr(pca, 'random_state'):
pca.random_state = rng
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=4)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework.test_framework import InfinitumTestFramework
from test_framework.util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
class WalletBackupTest(InfinitumTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
# nodes 1, 2,3 are spenders, let's give them a keypool=100
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import atexit
import os
import signal
import shlex
import shutil
import socket
import platform
import tempfile
import time
from subprocess import Popen, PIPE
from py4j.java_gateway import java_import, JavaGateway, JavaObject, GatewayParameters
from py4j.clientserver import ClientServer, JavaParameters, PythonParameters
from pyspark.find_spark_home import _find_spark_home
from pyspark.serializers import read_int, write_with_length, UTF8Deserializer
def launch_gateway(conf=None, popen_kwargs=None):
"""
launch jvm gateway
Parameters
----------
conf : :py:class:`pyspark.SparkConf`
spark configuration passed to spark-submit
popen_kwargs : dict
Dictionary of kwargs to pass to Popen when spawning
the py4j JVM. This is a developer feature intended for use in
customizing how pyspark interacts with the py4j JVM (e.g., capturing
stdout/stderr).
Returns
-------
ClientServer or JavaGateway
"""
if "PYSPARK_GATEWAY_PORT" in os.environ:
gateway_port = int(os.environ["PYSPARK_GATEWAY_PORT"])
gateway_secret = os.environ["PYSPARK_GATEWAY_SECRET"]
# Process already exists
proc = None
else:
SPARK_HOME = _find_spark_home()
# Launch the Py4j gateway using Spark's run command so that we pick up the
# proper classpath and settings from spark-env.sh
on_windows = platform.system() == "Windows"
script = "./bin/spark-submit.cmd" if on_windows else "./bin/spark-submit"
command = [os.path.join(SPARK_HOME, script)]
if conf:
for k, v in conf.getAll():
command += ['--conf', '%s=%s' % (k, v)]
submit_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "pyspark-shell")
if os.environ.get("SPARK_TESTING"):
submit_args = ' '.join([
"--conf spark.ui.enabled=false",
submit_args
])
command = command + shlex.split(submit_args)
# Create a temporary directory where the gateway server should write the connection
# information.
conn_info_dir = tempfile.mkdtemp()
try:
fd, conn_info_file = tempfile.mkstemp(dir=conn_info_dir)
os.close(fd)
os.unlink(conn_info_file)
env = dict(os.environ)
env["_PYSPARK_DRIVER_CONN_INFO_PATH"] = conn_info_file
# Launch the Java gateway.
popen_kwargs = {} if popen_kwargs is None else popen_kwargs
# We open a pipe to stdin so that the Java gateway can die when the pipe is broken
popen_kwargs['stdin'] = PIPE
# We always set the necessary environment variables.
popen_kwargs['env'] = env
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
popen_kwargs['preexec_fn'] = preexec_func
proc = Popen(command, **popen_kwargs)
else:
# preexec_fn not supported on Windows
proc = Popen(command, **popen_kwargs)
# Wait for the file to appear, or for the process to exit, whichever happens first.
while not proc.poll() and not os.path.isfile(conn_info_file):
time.sleep(0.1)
if not os.path.isfile(conn_info_file):
raise RuntimeError("Java gateway process exited before sending its port number")
with open(conn_info_file, "rb") as info:
gateway_port = read_int(info)
gateway_secret = UTF8Deserializer().loads(info)
finally:
shutil.rmtree(conn_info_dir)
# In Windows, ensure the Java child processes do not linger after Python has exited.
# In UNIX-based systems, the child process can kill itself on broken pipe (i.e. when
# the parent process' stdin sends an EOF). In Windows, however, this is not possible
# because java.lang.Process reads directly from the parent process' stdin, contending
# with any opportunity to read an EOF from the parent. Note that this is only best
# effort and will not take effect if the python process is violently terminated.
if on_windows:
# In Windows, the child process here is "spark-submit.cmd", not the JVM itself
# (because the UNIX "exec" command is not available). This means we cannot simply
# call proc.kill(), which kills only the "spark-submit.cmd" process but not the
# JVMs. Instead, we use "taskkill" with the tree-kill option "/t" to terminate all
# child processes in the tree (http://technet.microsoft.com/en-us/library/bb491009.aspx)
def killChild():
Popen(["cmd", "/c", "taskkill", "/f", "/t", "/pid", str(proc.pid)])
atexit.register(killChild)
# Connect to the gateway (or client server to pin the thread between JVM and Python)
if os.environ.get("PYSPARK_PIN_THREAD", "true").lower() == "true":
gateway = ClientServer(
java_parameters=JavaParameters(
port=gateway_port,
auth_token=gateway_secret,
auto_convert=True),
python_parameters=PythonParameters(
port=0,
eager_load=False))
else:
gateway = JavaGateway(
gateway_parameters=GatewayParameters(
port=gateway_port,
auth_token=gateway_secret,
auto_convert=True))
# Store a reference to the Popen object for use by the caller (e.g., in reading stdout/stderr)
gateway.proc = proc
# Import the classes used by PySpark
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.ml.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
java_import(gateway.jvm, "org.apache.spark.resource.*")
# TODO(davies): move into sql
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.api.python.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
return gateway
def _do_server_auth(conn, auth_secret):
"""
Performs the authentication protocol defined by the SocketAuthHelper class on the given
file-like object 'conn'.
"""
write_with_length(auth_secret.encode("utf-8"), conn)
conn.flush()
reply = UTF8Deserializer().loads(conn)
if reply != "ok":
conn.close()
raise RuntimeError("Unexpected reply from iterator server.")
def local_connect_and_auth(port, auth_secret):
"""
Connect to local host, authenticate with it, and return a (sockfile,sock) for that connection.
Handles IPV4 & IPV6, does some error handling.
Parameters
----------
port : str or int or None
auth_secret : str
Returns
-------
tuple
with (sockfile, sock)
"""
sock = None
errors = []
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("127.0.0.1", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, _, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.settimeout(int(os.environ.get("SPARK_AUTH_SOCKET_TIMEOUT", 15)))
sock.connect(sa)
sockfile = sock.makefile("rwb", int(os.environ.get("SPARK_BUFFER_SIZE", 65536)))
_do_server_auth(sockfile, auth_secret)
return (sockfile, sock)
except socket.error as e:
emsg = str(e)
errors.append("tried to connect to %s, but an error occurred: %s" % (sa, emsg))
sock.close()
sock = None
raise RuntimeError("could not open socket: %s" % errors)
def ensure_callback_server_started(gw):
"""
Start callback server if not already started. The callback server is needed if the Java
driver process needs to callback into the Python driver process to execute Python code.
"""
# getattr will fallback to JVM, so we cannot test by hasattr()
if "_callback_server" not in gw.__dict__ or gw._callback_server is None:
gw.callback_server_parameters.eager_load = True
gw.callback_server_parameters.daemonize = True
gw.callback_server_parameters.daemonize_connections = True
gw.callback_server_parameters.port = 0
gw.start_callback_server(gw.callback_server_parameters)
cbport = gw._callback_server.server_socket.getsockname()[1]
gw._callback_server.port = cbport
# gateway with real port
gw._python_proxy_port = gw._callback_server.port
# get the GatewayServer object in JVM by ID
jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client)
# update the port of CallbackClient with real port
jgws.resetCallbackClient(jgws.getCallbackClient().getAddress(), gw._python_proxy_port)
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "processors-"
cfg.versionfile_source = "processors/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| |
"""Functions to plot a vega plot from Extremefill data
"""
# pylint: disable=no-value-for-parameter
import os
import uuid
import numpy as np
from skimage import measure
import xarray
# pylint: disable=redefined-builtin, no-name-in-module
from toolz.curried import pipe, juxt, valmap, concat, map, do
from scipy.interpolate import griddata
import pandas
import yaml
import vega
from IPython.display import display, publish_display_data
from .tools import tlam, enum, render_yaml, get_path, all_files, render_j2
def vega_plot_treant(treant):
"""Make a vega plot
Args:
treant: a treant
Returns
a vega.Vega type
>>> from click.testing import CliRunner
>>> from extremefill2D.fextreme import init_sim
>>> from extremefill2D.fextreme.tools import base_path
>>> with CliRunner().isolated_filesystem() as dir_:
... assert pipe(
... os.path.join(base_path(), 'scripts', 'params.json'),
... init_sim(data_path=dir_),
... vega_plot_treant,
... lambda x: type(x) is vega.Vega)
"""
return vega_plot_treants_together([treant])
def vega_plot_treants_together(treants):
"""Make a vega plot with multiple treants
Args:
treants: a list of treants
Returns:
a vega.Vega type
"""
return vega.Vega(render_spec(treants))
def vega_plot_treants(treants):
"""Make a vega plot with side-by-side plots
Args:
treants: a list of treants
Returns
a MultiVega instance
>>> from click.testing import CliRunner
>>> from extremefill2D.fextreme import init_sim
>>> from extremefill2D.fextreme.tools import base_path
>>> with CliRunner().isolated_filesystem() as dir_:
... assert pipe(
... os.path.join(base_path(), 'scripts', 'params.json'),
... init_sim(data_path=dir_),
... lambda x: [x, x],
... vega_plot_treants,
... lambda x: type(x) is MultiVega)
"""
return pipe(
treants,
map(lambda x: render_spec([x])),
list,
MultiVega
)
def render_spec(treants):
"""Turn a list of Extremefill treants into a sigle Vega plot
Args:
treants: a list of Extremefill treants
Returns:
a list of vega specs
"""
return pipe(
treants,
enum(lambda i, x: vega_contours(x, counter=i)),
concat,
list,
lambda x: render_yaml(os.path.join(get_path(__file__),
'templates',
'vega.yaml.j2'),
data=dict(data=x, title=treants[0].uuid[:8])),
yaml.load
)
def vega_contours(treant, counter=0):
"""
Get the contours as Vega data.
Args:
treant: a Treant object with data files
Returns:
contours formatted as Vega data
"""
return pipe(
treant,
all_files('*.nc'),
map(contours_from_datafile),
concat,
map(pandas.DataFrame),
map(lambda x: x.rename(columns={0: 'x', 1: 'y'})),
map(lambda x: x.to_dict(orient='records')),
map(map(valmap(float))),
map(list),
enum(lambda i, x: dict(name='contour_data{0}_{1}'.format(i, counter),
values=x)),
)
def contours_from_datafile(datafile):
"""Calculate the contours given a netcdf datafile
Args:
datafile: the netcdf datafile
Returns:
a list of contours
"""
return pipe(
datafile,
xarray.open_dataset,
lambda x: dict(x=x.x.values,
y=x.y.values,
z=x.distance.values,
dx=x.nominal_dx),
contours
)
def contours(data):
"""Get zero contours from x, y, z data
Args:
data: dictionary with (x, y, z, dx) keys
Returns:
a list of (N, 2) numpy arrays representing the contours
"""
def linspace_(arr, spacing):
"""Calcuate the linspace based on a spacing
"""
return pipe(
arr,
juxt(min, max),
tlam(lambda x_, y_: np.linspace(x_, y_, (y_ - x_) / spacing))
)
return pipe(
data,
lambda x: dict(xi=linspace_(x['x'], x['dx']),
yi=linspace_(x['y'], x['dx']),
**x),
lambda x: griddata((x['y'], x['x']),
x['z'],
(x['yi'][None, :], x['xi'][:, None]),
method='cubic'),
lambda x: measure.find_contours(x, 0.0),
map(lambda x: float(data['dx']) * x)
)
def render_html(ids):
"""Render the HTML for the IPython Vega plots.
Args:
ids: the tags for each div element
Returns:
the rendered HTML
"""
return render_j2(os.path.join(get_path(__file__),
'templates/multivega.html.j2'),
dict(ids=ids),
dict())
def html_publish_map(data):
"""Run IPython's 'publish_display_data' for each spec.
Args:
data: list of (id, spec) pairings
"""
pipe(
data,
map(lambda x: x[0]),
list,
lambda x: publish_display_data(
{'text/html': render_html(x)},
metadata={'jupyter-vega': '#{0}'.format(x[0])})
)
def js_publish(id_, inst):
"""Generate Vega JS
Args:
id_: a unique ID to tag the element
inst: a Vega instance
"""
publish_display_data(
# pylint: disable=protected-access
{'application/javascript': inst._generate_js(id_)},
metadata={'jupyter-vega': '#{0}'.format(id_)}
)
def ipython_display(specs):
"""Run publish_display_data for the JS and HTML
Args:
specs: a list of Vega specs
"""
pipe(
specs,
map(lambda x: (uuid.uuid4(), vega.Vega(x))),
list,
do(html_publish_map),
map(tlam(js_publish)),
list
)
class MultiVega(object): # pylint: disable=too-few-public-methods
"""Side-by-side vega plots
>>> from click.testing import CliRunner
>>> from extremefill2D.fextreme import init_sim
>>> from extremefill2D.fextreme.tools import base_path
>>> with CliRunner().isolated_filesystem() as dir_:
... inst = pipe(
... os.path.join(base_path(), 'scripts', 'params.json'),
... init_sim(data_path=dir_),
... lambda x: [x, x],
... vega_plot_treants,
... do(lambda x: x._ipython_display_())
... )
... inst.display()
"""
def __init__(self, specs):
self.specs = specs
def _ipython_display_(self):
ipython_display(self.specs)
def display(self):
"""Display in IPython Notebook.
"""
display(self)
| |
"""
Simulate interlaced spectra.
"""
import os
import glob
from pylab import cm
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import numpy as np
#import pyfits
import astropy.io.fits as pyfits
import unicorn
import unicorn.interlace_fit
import unicorn.utils_c as utils_c
import threedhst
def sim_all():
"""
Run in GRISM_HOME / SIMULATIONS, loop through all pointings and run the
spectra simulation.
"""
import glob
import unicorn
import unicorn.intersim
files = glob.glob('*G141_inter.fits')
for file in files:
root=file.split('-G141')[0]
unicorn.intersim.simspec(root=root)
def simspec(root='COSMOS-19'):
"""
Root is the base image where the noise and direct images come from.
"""
#### Simple model of a gaussian Ha emission line
xflux = np.arange(1.e4,1.8e4)
dv = 100 # km/s
z0 = 1.0
l0 = 6564.61*(1+z0)
dlam = dv/3.e5*l0
yline = 1./np.sqrt(2*np.pi*dlam**2)*np.exp(-(xflux-l0)**2/2/dlam**2)
### Use template emission lines rather than a single gaussian
xflux, yline = np.loadtxt(unicorn.GRISM_HOME+'/templates/dobos11/SF0_0.emline.txt', unpack=True)
xflux *= (1+z0)
#### Add continuum, here with level 0.1*max(line)
ycont = yline.max()*0.1
yflux = ycont+yline
#### Normalize to F140W passband
x_filt, y_filt = np.loadtxt(os.getenv('iref')+'/F140W.dat', unpack=True)
y_filt_int = utils_c.interp_c(xflux, x_filt, y_filt)
filt_norm = np.trapz(y_filt_int*yflux, xflux) / np.trapz(y_filt_int, xflux)
yflux /= filt_norm
yline /= filt_norm
ycont /= filt_norm
ids = [290]
model = unicorn.reduce.GrismModel(root)
ids = model.cat.id[model.cat.mag < 24]
#ids = [245]
#### Generate model where every spectrum is the line template but the mag/shape of the galaxies
#### is as observed
for i,id in enumerate(ids):
print unicorn.noNewLine+'%d (%d/%d)' %(id, i+1, len(ids))
model.compute_object_model(id, lam_spec=xflux, flux_spec=yflux)
model.model += model.object
#### Get error array from the error extension
err = np.random.normal(size=model.model.shape)*model.gris[2].data
mask = (err != 0) & (model.segm[0].data == 0)
#### Compare background flux distributions
#plt.hist(model.gris[1].data[mask].flatten(), range=(-0.1,0.1), bins=100, alpha=0.5)
#plt.hist(err[mask].flatten(), range=(-0.1,0.1), bins=100, alpha=0.5)
#### Store the new model in the grism image data extension so that we can fit it with the
#### various tools (z, line strength, etc)
#old = model.gris[1].data*1.
model.gris[1].data = model.model*(err != 0) + err
model.get_corrected_wcs(verbose=True)
model.init_object_spectra()
model.model*=0
##### Try extracting a spectrum and fitting it
#id=685
#id=343
#id=ids[0]
for id in ids:
obj='%s_%05d' %(root, id)
print '%s.linefit.png' %(obj)
if os.path.exists('%s.linefit.png' %(obj)):
print 'skip'
continue
flam = np.sum(model.flux[model.segm[0].data == id])
fnu = np.sum(model.flux_fnu*(model.segm[0].data == id))
### *Input* line flux, should be able to get this directly from the input spectrum and the
### observed magnitude, but check units.
#plt.plot(xflux, yflux/filt_norm*flam*1.e-17)
ha = np.abs(xflux-6564*(1+z0)) < 100
ha_flux = np.trapz(yline[ha]*flam*1.e-17, xflux[ha])
ha_eqw = np.trapz(yline[ha]/ycont, xflux[ha])
s2 = np.abs(xflux-6731*(1+z0)) < 100
s2_flux = np.trapz(yline[s2]*flam*1.e-17, xflux[s2])
s2_eqw = np.trapz(yline[s2]/ycont, xflux[s2])
model.twod_spectrum(id, refine=True, verbose=True)
if not model.twod_status:
continue
model.show_2d(savePNG=True)
spec = unicorn.reduce.Interlace1D(root+'_%05d.1D.fits' %(id), PNG=True)
#### Redshift fit, set template to flat and the redshift prior to a broad gaussian centered
#### on the input value, z0
zgrid = np.arange(0,4,0.005)
pz = np.exp(-(zgrid-z0)**2/2/0.5**2)
lnprob = np.log(pz)
gris = unicorn.interlace_fit.GrismSpectrumFit(root=obj, lowz_thresh=0.01, FIGURE_FORMAT='png')
if not gris.status:
continue
gris.zout.z_spec = gris.zout.z_spec*0.+z0
gris.zout.l99 = gris.zout.l99*0.+z0-0.1
gris.zout.u99 = gris.zout.l99+0.2
gris.z_peak = 1
gris.best_fit = gris.best_fit*0+1
gris.phot_zgrid = zgrid
gris.phot_lnprob = lnprob
try:
gris.fit_in_steps(dzfirst=0.005, dzsecond=0.0005, zrfirst=(z0-0.2,z0+0.2))
except:
continue
if not gris.status:
continue
#### Emission line fit
try:
gris.fit_free_emlines(ztry=gris.z_max_spec, verbose=True, NTHREADS=1, NWALKERS=50, NSTEP=100, FIT_REDSHIFT=False, FIT_WIDTH=False, line_width0=100)
except:
continue
status = os.system('cat %s.linefit.dat' %(obj))
print '\n -- input --\nSII %6.2f %6.2f' %(s2_flux/1.e-17, s2_eqw)
print ' Ha %6.2f %6.2f' %(ha_flux/1.e-17, ha_eqw)
def get_results(force_new=False):
"""
Collate the results from the simulated spectra and the input catalogs into single output
catalogs suitable for reading and plotting.
for field in ['AEGIS','COSMOS','UDS','GOODS-S']:
os.chdir(unicorn.GRISM_HOME+'%s/PREP_FLT' %(field))
unicorn.intersim.get_results()
os.chdir(unicorn.GRISM_HOME+'SIMULATIONS')
status = os.system('cat ../AEGIS/PREP_FLT/simspec.dat ../COSMOS/PREP_FLT/simspec.dat ../GOODS-S/PREP_FLT/simspec.dat ../UDS/PREP_FLT/simspec.dat > all_simspec.dat')
"""
import threedhst.catIO as catIO
files=glob.glob('*linefit.dat')
cat = None
if (not os.path.exists('simspec.dat')) | force_new:
fp = open('simspec.dat','w')
fp.write('# object sky_avg sky_lo sky_hi mag r50 r90 z_fit continuum_sn ha_flux ha_flux_err ha_eqw ha_eq_err s2_flux s2_flux_err s2_eqw s2_eq_err\n')
fp.write('dummy 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n')
fp.close()
log = catIO.Readfile('simspec.dat')
for ii, file in enumerate(files):
root = file.split('.linefit')[0]
print unicorn.noNewLine+'%s (%d/%d)' %(root, ii+1, len(files))
if root in log.object:
continue
#
fp = open('simspec.dat','a')
pointing = root.split('_')[0]
id = int(root.split('_')[1])
if cat is None:
cat = threedhst.sex.mySexCat(pointing+'_inter.cat')
### Get sky background
asn = threedhst.utils.ASNFile(pointing+'-G141_asn.fits')
bg = []
for exp in asn.exposures:
flt = pyfits.open(exp+'_flt.fits')
bg.append(flt[0].header['SKYSCALE'])
#
bg_avg = np.mean(bg)
bg_lo = np.min(bg)
bg_hi = np.max(bg)
else:
if not cat.filename.startswith(pointing+'-'):
cat = threedhst.sex.mySexCat(pointing+'_inter.cat')
asn = threedhst.utils.ASNFile(pointing+'-G141_asn.fits')
bg = []
for exp in asn.exposures:
flt = pyfits.open(exp+'_flt.fits')
bg.append(flt[0].header['SKYSCALE'])
#
bg_avg = np.mean(bg)
bg_lo = np.min(bg)
bg_hi = np.max(bg)
#
gris = unicorn.interlace_fit.GrismSpectrumFit(root, verbose=False)
if not gris.status:
fp.close()
continue
#
result = gris.stats()
if result is False:
fp.close()
continue
#
DIRECT_MAG, Q_Z, F_COVER, F_FLAGGED, MAX_CONTAM, INT_CONTAM, F_NEGATIVE = result
#
lwindow = (gris.oned.data.wave > 1.4e4) & (gris.oned.data.wave < 1.6e4)
if (lwindow.sum() < 10) | (INT_CONTAM > 0.3):
fp.close()
continue
#
continuum_sn = np.median((gris.oned.data.flux/gris.oned.data.error)[lwindow])
#
lfit = catIO.Readfile(root+'.linefit.dat')
if lfit.status is None:
fp.close()
continue
#
if 'Ha' in lfit.line:
ix = np.arange(len(lfit.line))[lfit.line == 'Ha'][0]
ha_flux, ha_flux_err, ha_eqw, ha_eqw_err = lfit.flux[ix], lfit.error[ix], lfit.eqw_obs[ix], lfit.eqw_obs_err[ix]
else:
ha_flux, ha_flux_err, ha_eqw, ha_eqw_err = -1,-1,-1,-1
#
if 'SII' in lfit.line:
ix = np.arange(len(lfit.line))[lfit.line == 'SII'][0]
s2_flux, s2_flux_err, s2_eqw, s2_eqw_err = lfit.flux[ix], lfit.error[ix], lfit.eqw_obs[ix], lfit.eqw_obs_err[ix]
else:
s2_flux, s2_flux_err, s2_eqw, s2_eqw_err = -1,-1,-1,-1
#
ic = np.arange(cat.nrows)[cat.id == id][0]
fp.write(' %s %5.2f %5.2f %5.2f %6.3f %6.2f %6.2f %6.4f %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f\n' %(root, bg_avg, bg_lo, bg_hi, DIRECT_MAG, float(cat.FLUX_RADIUS[ic]), float(cat.FLUX_RADIUS2[ic]), gris.z_max_spec, continuum_sn, ha_flux, ha_flux_err, ha_eqw, ha_eqw_err, s2_flux, s2_flux_err, s2_eqw, s2_eqw_err))
#
fp.close()
def show_results(use_tex=False):
import threedhst.catIO as catIO
stats = catIO.Readfile('all_simspec.dat')
ha_model, s2_model = unicorn.intersim.get_line_fluxes(z0=1.0, mag=stats.mag)
xstar = [14.5, 24.1]
ystar = [3.00, 2.13]
yi = np.interp(stats.mag, xstar, ystar)
#plt.scatter(stats.mag, yi, s=0.1, color='black')
is_star = stats.r50 < yi
plt.scatter(stats.mag[is_star], stats.r50[is_star], alpha=0.5)
plt.scatter(stats.mag[~is_star], stats.r50[~is_star], alpha=0.2, color='red')
#### Color by r50/r90 concentration
concentration = stats.r50/stats.r90
msize = np.maximum((concentration/0.2)**4,4)
mcol = np.minimum((np.maximum(concentration,0.3)-0.3)/0.2,1)
plt.scatter(stats.mag, concentration, c=mcol, alpha=0.5)
mcol = np.minimum(np.log10(stats.r50-1.1),1)
stats.sky_avg += np.random.normal(size=stats.sky_avg.shape)*0.01
sky_col = np.minimum((stats.sky_avg - 0.8)/0.8,1)
plt.scatter(stats.mag, stats.sky_avg, c=sky_col, alpha=0.5)
#### Continuum depth
BINWIDTH=92
bin_sn = np.sqrt(BINWIDTH/22)
binned = stats.continuum_sn*bin_sn
#### Get correction functions
xm, ym, ys, nn = threedhst.utils.runmed(stats.mag, binned, NBIN=80)
ymag = np.interp(stats.mag, xm, ym)
sub = (stats.mag > 19) & (stats.mag < 22.5) & (stats.continuum_sn > 0) & (stats.ha_flux > 0) #& (~is_star)
xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub], (binned/ymag)[sub], NBIN=20)
ysize = np.interp(stats.r50, xm, ym)
xm, ym, ys, nn = threedhst.utils.runmed(stats.sky_avg[sub], (binned/ymag/ysize)[sub], NBIN=25)
ysky = np.interp(stats.sky_avg, xm, ym)
xm, ym, ys, nn = threedhst.utils.runmed(concentration[sub], (binned/ymag/ysize/ysky)[sub], NBIN=25)
ycons = np.interp(concentration, xm, ym)
fig = unicorn.catalogs.plot_init(xs=8, aspect=1./4, left=0.07, use_tex=use_tex)
#fig.subplots_adjust(wspace=0.27, hspace=0.25, left=0.12) # 2x2
fig.subplots_adjust(wspace=0.38, hspace=0.25, left=0.074, bottom=0.22)
si = 4
mark = 'o'
cmap = cm.jet
bins = [80,80]
ax = fig.add_subplot(141)
#plt.scatter(stats.mag, stats.continuum_sn*bin_sn, alpha=0.5, c=mcol)
use = np.isfinite(binned) & (binned > 0)
#plt.scatter(stats.mag[use], (binned/ysize/ysky)[use], alpha=0.5, c=mcol[use], s=si, marker=mark)
unicorn.intersim.show_hist_contour(stats.mag[use], (binned/ysize/ysky)[use], axrange=[[20,24],[0.5,100]], ylog=True, cmap=cmap, bins=bins)
xm, ym, ys, nn = threedhst.utils.runmed(stats.mag[use], (binned/ysize/ysky)[use], NBIN=80)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.ylim(0.5,100)
plt.plot([20,24],[5,5], color='black', alpha=0.4)
plt.xlim(20,24)
plt.semilogy()
if use_tex:
plt.xlabel(r'MAG\_AUTO $m_{140}$')
else:
plt.xlabel(r'MAG_AUTO $m_{140}$')
plt.ylabel('continuum S/N')
ax.xaxis.set_major_locator(unicorn.analysis.MyLocator(6, integer=True))
ax.xaxis.set_minor_locator(MultipleLocator(0.5))
ax.set_yticks([1,10,100]); ax.set_yticklabels(['1','10','100'])
sn5_limit = np.interp(5,ym[::-1],xm[::-1])
print 'Continuum, S/N=5 @ %.3f' %(sn5_limit)
print threedhst.utils.biweight(stats.r50[sub], both=True)
ax = fig.add_subplot(142)
#plt.scatter(stats.r50[sub], (binned/ymag/ysky)[sub], c=mcol[sub], alpha=0.5, s=si)
unicorn.intersim.show_hist_contour(stats.r50[sub]*0.06, (binned/ymag/ysky)[sub], axrange=[[0,20*0.06],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub]*0.06, (binned/ymag/ysky)[sub], NBIN=20)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.fill_betweenx([0,10],[1.7*0.06,1.7*0.06],[2.5*0.06,2.5*0.06], alpha=0.15, color='black')
#plt.xlabel(r'$R_{50}$ [$0.\!\!^{\prime\prime}06$ pix]')
plt.xlabel(r'$R_{50}$ [arcsec]')
plt.ylabel(r'$\delta$ cont. S/N')
plt.ylim(0.3,1.7)
#plt.ylim(0.3,2.5)
plt.xlim(0,15*0.06)
majorLocator = MultipleLocator(0.2)
minorLocator = MultipleLocator(0.1)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
# x0 = np.interp(1,ym[::-1],xm[::-1])
# plt.plot(xm,(x0/xm), color='red')
# plt.plot(xm,(x0/xm)**0.5, color='red')
x0 = np.interp(1,ym[::-1],xm[::-1])
plt.plot(xm,(x0/xm)**(0.5), color='white', alpha=0.5, linewidth=2)
plt.plot(xm,(x0/xm)**(0.5), color='red', alpha=0.8)
ysize = np.interp(stats.r50*0.06, xm, ym)
# plt.scatter(stats.r50[sub], (binned/ymag/ysize)[sub], c=sky_col[sub], alpha=0.5)
# xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub], (binned/ymag/ysize)[sub], NBIN=10)
# plt.plot(xm, ym, linewidth=2, color='black', alpha=0.5)
ax = fig.add_subplot(143)
#plt.scatter(stats.sky_avg[sub], (binned/ymag/ysize)[sub], c=mcol[sub], alpha=0.5, s=si)
unicorn.intersim.show_hist_contour(stats.sky_avg[sub], (binned/ymag/ysize)[sub], axrange=[[0.5,3.5],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(stats.sky_avg[sub], (binned/ymag/ysize)[sub], NBIN=25)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.ylim(0.3,1.7)
plt.xlim(0.5,3.5)
plt.xlabel(r'Background [e$^-$ / s]')
plt.ylabel(r'$\delta$ cont S/N')
ax.xaxis.set_major_locator(unicorn.analysis.MyLocator(6, integer=True))
x0 = np.interp(1,ym[::-1],xm[::-1])
plt.plot(xm,(x0/xm)**(0.5), color='white', alpha=0.5, linewidth=2)
plt.plot(xm,(x0/xm)**(0.5), color='red', alpha=0.7)
ysky = np.interp(stats.sky_avg, xm, ym)
### Very little residual trend with concentration
ax = fig.add_subplot(144)
#plt.scatter(concentration[sub], (binned/ymag/ysize/ysky)[sub], c=mcol[sub], s=si, alpha=0.5)
unicorn.intersim.show_hist_contour(concentration[sub], (binned/ymag/ysize/ysky)[sub], axrange=[[0.25,0.60],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(concentration[sub], (binned/ymag/ysize/ysky)[sub], NBIN=25)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.xlim(0.25,0.60)
plt.ylim(0.3,1.7)
#plt.ylim(0.5,1.5)
plt.xlabel(r'$C = R_{50}/R_{90}$')
plt.ylabel(r'$\delta$ cont S/N')
#ax.xaxis.set_major_locator(unicorn.analysis.MyLocator(5, prune=None))
ax.xaxis.set_major_locator(MultipleLocator(0.1))
ycons = np.interp(concentration, xm, ym)
plt.savefig('grism_cont_sensitivity.pdf')
# #### Test
# plt.scatter(stats.mag, binned, alpha=0.5, c=sky_col, s=4)
# xm, ym, ys, nn = threedhst.utils.runmed(stats.mag, binned, NBIN=80)
# plt.errorbar(xm, ym, ys, linewidth=2, color='black', alpha=0.5)
# plt.ylim(0.1,2000)
# plt.plot([17,24],[5,5], color='black', alpha=0.4)
# plt.xlim(17,24)
# plt.semilogy()
#### Line fluxes
ha_sn = stats.ha_flux/stats.ha_flux_err
show = np.isfinite(ha_sn) & (ha_sn > 0) & (stats.ha_flux > 0)
xm, ym, ys, nn = threedhst.utils.runmed(stats.ha_flux[~is_star & show], ha_sn[~is_star & show], NBIN=25)
yline_flux = np.interp(stats.ha_flux, xm, ym)
#sub = (stats.ha_flux > 6) & (stats.ha_flux < 100) & (stats.mag > 18) & (np.isfinite(ha_sn)) # & (~is_star)
#sub = (stats.mag > 19) & (stats.mag < 22.5) & (stats.continuum_sn > 0) & (stats.ha_flux > 0) #& (~is_star)
xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub], (ha_sn/yline_flux)[sub], NBIN=30)
yline_r50 = np.interp(stats.r50, xm, ym)
xm, ym, ys, nn = threedhst.utils.runmed(stats.sky_avg[sub], (ha_sn/yline_flux/yline_r50)[sub], NBIN=20)
yline_sky = np.interp(stats.sky_avg, xm, ym)
xm, ym, ys, nn = threedhst.utils.runmed(concentration[sub], (ha_sn/yline_flux/yline_r50/yline_sky)[sub], NBIN=10)
yline_con = np.interp(concentration, xm, ym)
plt.errorbar(ha_model, stats.ha_flux, stats.ha_flux_err, marker='o', markersize=0.1, linestyle='None', color='0.5')
plt.scatter(ha_model, stats.ha_flux, c=mcol, zorder=100, alpha=0.5)
#plt.scatter(stats.s2_flux, s2_model, alpha=0.8, c=mc)
plt.plot([0.1,1000],[0.1,1000], color='black', alpha=0.5)
plt.xlim(0.5,1000)
plt.ylim(0.5,1000)
plt.loglog()
# 2x2
#fig = unicorn.catalogs.plot_init(xs=5.5, aspect=1, left=0.08)
#fig.subplots_adjust(wspace=0.27, hspace=0.25, left=0.12)
fig = unicorn.catalogs.plot_init(xs=8, aspect=1./4, left=0.07, use_tex=use_tex)
fig.subplots_adjust(wspace=0.38, hspace=0.25, left=0.074, bottom=0.22)
ax = fig.add_subplot(141)
si = 4
show = np.isfinite(ha_sn) & (ha_sn > 0) & (stats.ha_flux > 0)
#plt.scatter(stats.ha_flux[show], ha_sn[show], c=mcol[show], s=si, zorder=100, alpha=0.3)
unicorn.intersim.show_hist_contour(stats.ha_flux[show], (ha_sn/yline_r50/yline_sky/yline_con)[show], axrange=[[0.5,100],[0.5,100]], bins=bins, cmap=cmap, xlog=True, ylog=True)
xm, ym, ys, nn = threedhst.utils.runmed(stats.ha_flux[~is_star & show], (ha_sn/yline_r50/yline_sky/yline_con)[~is_star & show], NBIN=25)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.plot([0.5,100],[5,5], color='black', alpha=0.4)
plt.xlim(0.5,100)
plt.ylim(0.5,100)
plt.loglog()
plt.xlabel(r'line flux [$10^{-17}$ ergs / s / cm$^2$]')
plt.ylabel('line S/N')
ax.set_yticks([1,10,100]); ax.set_yticklabels(['1','10','100'])
ax.set_xticks([1,10,100]); ax.set_xticklabels(['1','10','100'])
sn5_limit = np.interp(5,ym,xm)
print 'Line, S/N=5 @ %.3e' %(sn5_limit)
print threedhst.utils.biweight(stats.r50[sub], both=True)
yline_flux = np.interp(stats.ha_flux, xm, ym)
#plt.scatter(stats.ha_flux, ha_sn/yline_flux, c=mcol, alpha=0.2)
#### Nice: line flux with respect to concentration after taking out the overall trend with
#### line strength
ax = fig.add_subplot(142)
#plt.scatter(stats.r50[sub], (ha_sn/yline_flux)[sub], c=mcol[sub], s=si, alpha=0.3)
unicorn.intersim.show_hist_contour(stats.r50[sub]*0.06, (ha_sn/yline_flux/yline_sky/yline_con)[sub], axrange=[[0,15*0.06],[0.3,2.5]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(stats.r50[sub]*0.06, (ha_sn/yline_flux/yline_sky/yline_con)[sub], NBIN=30)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20*0.06],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.fill_betweenx([0,10],[1.7*0.06,1.7*0.06],[2.5*0.06,2.5*0.06], alpha=0.15, color='black')
plt.ylim(0.3,2.5)
plt.xlim(0,15*0.06)
#plt.xlabel(r'$R_{50}$ [$0.\!\!^{\prime\prime}06$ pix]')
plt.ylabel(r'$\delta$ line S/N')
#plt.semilogy()
# x0 = np.interp(1,ym[::-1],xm[::-1])
# plt.plot(xm,(x0/xm), color='red')
# plt.plot(xm,(x0/xm)**0.5, color='red')
plt.xlabel(r'$R_{50}$ [arcsec]')
ax.xaxis.set_major_locator(MultipleLocator(0.2))
ax.xaxis.set_minor_locator(MultipleLocator(0.1))
x0 = np.interp(1,ym[::-1],xm[::-1])
plt.plot(xm,(x0/xm)**(0.5), color='red', alpha=0.7)
yline_r50 = np.interp(stats.r50*0.06, xm, ym)
ax = fig.add_subplot(143)
#plt.scatter(stats.sky_avg[sub], (ha_sn/yline_flux/yline_r50)[sub], c=mcol[sub], s=si, alpha=0.3)
unicorn.intersim.show_hist_contour(stats.sky_avg[sub], (ha_sn/yline_flux/yline_r50/yline_con)[sub], axrange=[[0.5,3.5],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(stats.sky_avg[sub], (ha_sn/yline_flux/yline_r50/yline_con)[sub], NBIN=20)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.ylim(0.3,1.7)
plt.xlim(0.5,3.5)
plt.xlabel(r'Background [e$^-$ / s]')
plt.ylabel(r'$\delta$ line S/N')
ax.xaxis.set_major_locator(unicorn.analysis.MyLocator(6, integer=True))
yline_sky = np.interp(stats.sky_avg, xm, ym)
x0 = np.interp(1,ym[::-1],xm[::-1])
plt.plot(xm,(x0/xm)**(0.5), color='red', alpha=0.7)
ax = fig.add_subplot(144)
#plt.scatter(concentration[sub], (ha_sn/yline_flux/yline_r50/yline_sky)[sub], c=mcol[sub], s=si, alpha=0.3)
unicorn.intersim.show_hist_contour(concentration[sub], (ha_sn/yline_flux/yline_r50/yline_sky)[sub], axrange=[[0.25,0.60],[0.3,1.7]], bins=bins, cmap=cmap)
xm, ym, ys, nn = threedhst.utils.runmed(concentration[sub], (ha_sn/yline_flux/yline_r50/yline_sky)[sub], NBIN=10)
plt.plot(xm, ym, linewidth=2, color='white', alpha=0.5, zorder=100)
plt.plot(xm, ym, linewidth=1, color='black', alpha=0.8, zorder=100)
plt.plot([0,20],[1,1], linewidth=1, alpha=0.4, zorder=101, color='black')
plt.xlim(0.25,0.60)
plt.ylim(0.3,1.7)
plt.xlabel(r'$C = R_{50}/R_{90}$')
plt.ylabel(r'$\delta$ line S/N')
ax.xaxis.set_major_locator(MultipleLocator(0.1))
yline_con = np.interp(concentration, xm, ym)
plt.savefig('grism_line_sensitivity.pdf')
# #### Test:
# show = (np.isfinite(ha_sn)) & (stats.ha_flux > 0)
# plt.scatter(stats.ha_flux[show], (ha_sn/yline_sky)[show], c=mcol[show], zorder=100, alpha=0.2)
# xm, ym, ys, nn = threedhst.utils.runmed(stats.ha_flux[show], (ha_sn/yline_sky)[show], NBIN=25)
# plt.plot(xm, ym, linewidth=2, color='black', alpha=0.5, zorder=100)
# plt.plot([0.5,1000],[5,5], color='black', alpha=0.4)
# plt.xlim(0.5,1000)
# plt.ylim(0.5,300)
# plt.loglog()
#plt.semilogy()
#
plt.scatter(stats.mag, stats.ha_flux, c=mcol, zorder=100, alpha=0.5)
plt.ylim(0.1,5000)
plt.semilogy()
#### EQW
dha = stats.ha_eqw-130.
hy, hx, hh = plt.hist(dha/stats.ha_eq_err, range=(-5,5), bins=50, alpha=0.7)
threedhst.utils.biweight(dha/stats.ha_eq_err, both=True)
#### redshift
dz = (stats.z_fit-1)/2.
plt.scatter(stats.mag, dz, c=mcol, alpha=0.5)
plt.scatter(stats.ha_flux, dz, c=mcol, alpha=0.5)
plt.xlim(0.1,5000)
plt.semilogx()
#### surface density
mu = stats.mag-2*np.log(stats.r90*0.06)
plt.scatter(stats.mag, mu, c=mcol)
def show_hist_contour(xin, yin, axrange=None, bins=[50,50], xlog=False, ylog=False, ax=None, Vbins=[2, 4, 8, 16, 32, 64, 128, 256, 512, 4096], cmap=cm.jet, fill=True, *args, **kwargs):
import matplotlib.colors as co
if xlog:
xdata = np.log10(xin)
else:
xdata = xin
if ylog:
ydata = np.log10(yin)
else:
ydata = yin
if axrange is None:
axrange = [[np.min(xdata),np.max(xdata)],[np.min(ydata),np.max(ydata)]]
if xlog:
for i in range(2):
axrange[0][i] = np.log10(axrange[0][i])
if ylog:
for i in range(2):
axrange[1][i] = np.log10(axrange[1][i])
hist, xedge, yedge = np.histogram2d(xdata, ydata, bins=bins, range=axrange)
#Vbins = [2, 4, 8, 16, 32, 64, 128, 256, 512, 4096]
values = 1.-np.arange(len(Vbins))*1./len(Vbins)
Vcolors = []
for i in range(len(Vbins)):
Vcolors.append('%f' %(values[i]))
if xlog:
xx = 10**((xedge[:-1]+xedge[1:])/2.)
else:
xx = (xedge[:-1]+xedge[1:])/2.
if ylog:
yy = 10**((yedge[:-1]+yedge[1:])/2.)
else:
yy = (yedge[:-1]+yedge[1:])/2.
norml = co.BoundaryNorm(Vbins, 312)
if ax is None:
if fill:
plt.contourf(xx, yy, hist.transpose(), Vbins, linethick=2, norm=norml, cmap=cmap, *args, **kwargs)
else:
plt.contour(xx, yy, hist.transpose(), Vbins, linethick=2, norm=norml, cmap=cmap, *args, **kwargs)
else:
if fill:
ax.contourf(xx, yy, hist.transpose(), Vbins, linethick=2, norm=norml, cmap=cmap, *args, **kwargs)
else:
ax.contour(xx, yy, hist.transpose(), Vbins, linethick=2, norm=norml, cmap=cmap, *args, **kwargs)
def get_line_fluxes(z0=1.0, mag=21):
"""
Get emission line fluxes for a given continuum magnitude.
"""
print z0
xflux, yline = np.loadtxt(unicorn.GRISM_HOME+'/templates/dobos11/SF0_0.emline.txt', unpack=True)
xflux *= (1+z0)
#### Add continuum, here with level 0.1*max(line)
ycont = yline.max()*0.1
yflux = ycont+yline
#### Normalize to F140W passband
x_filt, y_filt = np.loadtxt(os.getenv('iref')+'/F140W.dat', unpack=True)
y_filt_int = utils_c.interp_c(xflux, x_filt, y_filt)
filt_norm = np.trapz(y_filt_int*yflux, xflux) / np.trapz(y_filt_int, xflux)
yflux /= filt_norm
yline /= filt_norm
ycont /= filt_norm
fnu = 10**(-0.4*(mag+48.6))
flam = fnu*3.e18/(6564.*(1+z0))**2/1.e-17
ha = np.abs(xflux-6564*(1+z0)) < 100
ha_flux = np.trapz(yline[ha], xflux[ha])
s2 = np.abs(xflux-6731*(1+z0)) < 100
s2_flux = np.trapz(yline[s2], xflux[s2])
return ha_flux*flam, s2_flux*flam
# #### Trying to figure out units
# plt.plot(gris.twod.im['WAVE'].data, gris.twod.im['SENS'].data)
# plt.plot(unicorn.reduce.sens_files['A'].field('WAVELENGTH'), unicorn.reduce.sens_files['A'].field('SENSITIVITY')*1.e-17*np.median(np.diff(gris.twod.im['WAVE'].data))/2**2)
#
# # test, FLT errors
# flt = pyfits.open('ibhm47gwq_flt.fits')
# err_flt = np.random.normal(size=flt[1].data.shape)*flt[2].data
# mask_flt = (flt[1].data < 0.1) & (err_flt != 0)
# threedhst.utils.biweight(flt[1].data[mask_flt].flatten())
# threedhst.utils.biweight(err_flt[mask_flt].flatten())
| |
import ast
import json
import mock
import unittest
# these are for mocking out the requests lib
import responses
import requests
from pyramid.testing import setUp, tearDown
from swiftclient.exceptions import ClientException
from .. catalog import *
class CatalogTests(unittest.TestCase):
def setUp(self):
self.config = setUp()
def tearDown(self):
tearDown()
# ========================================
# = build_global_catalog_url
# ========================================
@mock.patch('localapi.catalog.settings')
def test_build_global_catalog_url(self, settings_mock):
expected_url = "http://localhost/v1/catalog"
settings_mock.return_value = "http://localhost"
result = build_global_catalog_url()
self.assertEqual(result, expected_url)
# ========================================
# = build_global_catalog_item_url
# ========================================
@mock.patch('localapi.catalog.settings')
def test_build_global_catalog_item_url(self, settings_mock):
expected_url = "http://localhost/v1/catalog_item"
settings_mock.return_value = "http://localhost"
result = build_global_catalog_item_url()
self.assertEqual(result, expected_url)
# ========================================
# = get_available_package
# ========================================
@mock.patch('requests.get')
@mock.patch('localapi.catalog.build_global_catalog_item_url')
def test_get_available_package(self, mock_catalog_item_url, mock_requests):
fake_response = {'status_code': 200, '_content': 'dummy_content'}
mock_requests.return_value = fake_response
result = get_available_package('dummy')
self.assertEqual(result, 'dummy_content')
@mock.patch('requests.get')
@mock.patch('localapi.catalog.build_global_catalog_item_url')
def test_get_available_package_missing(
self, mock_catalog_item_url, mock_requests):
fake_response = {'status_code': 500}
mock_requests.return_value = fake_response
result = get_available_package('dummy')
self.assertEqual(result, {})
@mock.patch('localapi.catalog.build_global_catalog_item_url')
def test_get_available_package_exeception(
self, mock_catalog_item_url):
error_message = 'ERROR'
mock_catalog_item_url.side_effect = Exception(error_message)
get_available_package('dummy')
self.assertRaises(Exception, error_message)
# ========================================
# = get_available_package_manifests
# ========================================
# @unittest.skip("skipping for now")
@responses.activate
@mock.patch('localapi.catalog.build_global_catalog_url')
def test_get_available_package_manifests(self, mock_catalog_url):
fake_global_api_url = 'http://example.com/v1/catalog'
mock_catalog_url.return_value = fake_global_api_url
fake_resp = {
"status": 200,
"packages": [{
"status": "available",
"name": "Node Env",
"author": "foo.bar@corp.com",
"package": "node-env.tar.gz",
"version": "0.0",
"tags": ["node-env", "nodejs", "foo-bar", "ALS", "helion"],
"icon": "nodejs.png"}]}
responses.add(
responses.GET,
fake_global_api_url,
status=200,
body=json.dumps(fake_resp),
content_type='application/json')
response = get_available_package_manifests()
self.assertEqual(response['status'], 200)
self.assertEqual(response['packages'], fake_resp['packages'])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, fake_global_api_url)
self.assertEqual(ast.literal_eval(
responses.calls[0].response.text), fake_resp)
@responses.activate
@mock.patch('localapi.catalog.build_global_catalog_url')
def test_get_available_package_manifests_returns_404(
self, mock_catalog_url):
fake_global_api_url = 'http://example.com/v1/catalog'
mock_catalog_url.return_value = fake_global_api_url
responses.add(responses.GET, fake_global_api_url,
status=404, content_type='application/json')
resp = get_available_package_manifests()
self.assertEqual(resp['status'], 404)
# No need to call .json() on the response; response was sent as json
self.assertEqual(resp['packages'], [])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, fake_global_api_url)
self.assertEqual(responses.calls[0].response.text, u'')
@responses.activate
@mock.patch('localapi.catalog.build_global_catalog_url')
def test_get_available_package_manifests_returns_unknown_error(
self, mock_catalog_url):
fake_global_api_url = 'http://example.com/v1/catalog'
mock_catalog_url.return_value = fake_global_api_url
responses.add(responses.GET, fake_global_api_url,
status=401, content_type='application/json')
resp = get_available_package_manifests()
self.assertEqual(resp['status'], 401)
# No need to call .json() on the response; response was sent as json
self.assertEqual(resp['packages'], [])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, fake_global_api_url)
self.assertEqual(responses.calls[0].response.text, u'')
# ========================================
# = get_package_manifests
# ========================================
@mock.patch('localapi.catalog.apply_status_to_package_list')
@mock.patch('localapi.catalog.filter_manifests')
@mock.patch('localapi.catalog.connection')
@mock.patch('localapi.catalog.get_auth_token')
@mock.patch('localapi.catalog.settings')
def test_get_package_manifests(
self, mock_settings, mock_get_auth_token, mock_connection,
mock_filter, mock_apply_status):
filelist1 = [
{'content_type': 'application/json', 'content': 'foo'},
{'content_type': 'application/json', 'content': 'bar'},
{'content_type': 'application/xml', 'content': 'baz'}
]
filelist2 = [
{'content_type': 'application/json', 'content': 'foo'},
{'content_type': 'application/json', 'content': 'bar'}
]
filelist3 = [{
'content_type': 'application/json',
'content': 'foo',
'status': 'installed'
}, {
'content_type': 'application/json',
'content': 'bar',
'status': 'installed'}
]
mock_headers = mock.Mock()
mock_connection.get_container.return_value = [mock_headers, filelist1]
mock_filter.return_value = filelist2
mock_apply_status.return_value = filelist3
result = get_package_manifests()
self.assertEqual(result['status'], 200)
self.assertEqual(result['packages'], filelist3)
@mock.patch('localapi.catalog.settings')
@mock.patch('localapi.catalog.get_auth_token')
@mock.patch('localapi.catalog.connection')
def test_get_package_manifests_404(
self, mock_connection, mock_get_auth_token, mock_settings):
error_message = 'CONNECTION ERROR'
mock_connection.side_effect = ClientException(http_status=404,
msg=error_message)
result = get_package_manifests()
self.assertEqual(result['status'], 404)
# ToDo: remove this once we remove the below ClientException in catalog.py
@mock.patch('localapi.catalog.settings')
@mock.patch('localapi.catalog.get_auth_token')
def test_get_package_manifests_client_exception(
self, mock_get_auth_token, mock_settings):
error_message = 'ERROR'
mock_get_auth_token.side_effect = ClientException(error_message)
result = get_package_manifests()
self.assertEqual(result['status'], 500)
@mock.patch('localapi.catalog.settings')
def test_get_package_manifests_exception(
self, mock_settings):
error_message = 'ERROR'
mock_settings.side_effect = Exception(error_message)
result = get_package_manifests()
self.assertEqual(result['status'], 500)
# ========================================
# = apply_status_to_package_list
# ========================================
def test_apply_status_to_package_list(self):
src_pkgs = [{'name': 'pkg1'}, {'name': 'pkg2'}]
status = 'dummy'
pkg_list = [{'name': 'pkg1', 'status': 'dummy'},
{'name': 'pkg2', 'status': 'dummy'}]
result = apply_status_to_package_list(src_pkgs, status)
self.assertEqual(result, pkg_list)
# ========================================
# = filter_manifests
# ========================================
def test_filter_manifests(self):
all_data = [
{'content': 'foo', 'content_type': 'application/json', 'name': 'a'},
{'content': 'bar', 'content_type': 'application/json', 'name': 'b'},
{
'name': 'deployment_',
'content': 'baz',
'content_type': 'application/xml'
}]
bad_data = [{
'name': 'deployment_',
'content': 'baz',
'content_type': 'application/xml'
}]
res_good = [
{'content': 'foo', 'content_type': 'application/json', 'name': 'a'},
{'content': 'bar', 'content_type': 'application/json', 'name': 'b'},
]
result = filter_manifests(all_data)
self.assertEqual(result, res_good)
def test_filter_manifests_when_none(self):
result = filter_manifests([])
self.assertEqual(result, [])
# ========================================
# = get_package_list
# ========================================
@mock.patch('localapi.catalog.apply_status_to_package_list')
@mock.patch('localapi.catalog.get_available_package_manifests')
@mock.patch('localapi.catalog.get_package_manifests')
def test_get_package_list(
self, mock_get_manifests, mock_get_avail_manifests,
mock_apply_status):
pkg1 = [{'name1': 'pkg1', 'status': 'installed'}]
pkg2 = [{'name2': 'pkg2', 'status': 'available'}]
pkgs = [{'name1': 'pkg1', 'status': 'installed'},
{'name2': 'pkg2', 'status': 'available'}]
mock_ins_mnfts = {'status': 200, 'packages': pkg1}
mock_avl_mnfts = {'status': 200, 'packages': pkg2}
mock_all_mnfts = {'status': 200, 'packages': pkgs}
mock_get_manifests.return_value = mock_ins_mnfts
mock_get_avail_manifests.return_value = mock_avl_mnfts
mock_apply_status.return_value = mock_all_mnfts
result = get_package_list()
# ToDo - beef up the comparison here
# self.assertEqual(result['status'], mock_manifests['status'])
# self.assertEqual(result, None)
self.assertEqual(len(result['packages']),
len(mock_all_mnfts['packages']))
@mock.patch('localapi.catalog.get_package_manifests')
@mock.patch('localapi.catalog.get_available_package_manifests')
def test_get_package_list_when_no_packages(self, mock_avail_get_manifests,
mock_inst_get_manifests):
result_not_avail = {'status': 404, 'packages': []}
mock_avail_get_manifests.return_value = result_not_avail
mock_inst_get_manifests.return_value = result_not_avail
result = get_package_list()
self.assertEqual(result['status'], 404)
self.assertEqual(result['packages'], [])
@mock.patch('localapi.catalog.get_package_manifests')
@mock.patch('localapi.catalog.get_available_package_manifests')
def test_get_package_list_when_no_available_pkgs(
self, mock_avail_get_manifests, mock_inst_get_manifests):
result_not_avail = {'status': 404, 'packages': []}
installed_pkgs = {'status': 200,
'packages': [{
'name1': 'pkg1',
'status': 'installed'}]}
mock_avail_get_manifests.return_value = result_not_avail
mock_inst_get_manifests.return_value = installed_pkgs
response = get_package_list()
self.assertEqual(response['status'], 200)
self.assertEqual(response['packages'],
installed_pkgs['packages'])
@mock.patch('localapi.catalog.get_package_manifests')
@mock.patch('localapi.catalog.get_available_package_manifests')
def test_get_package_list_when_no_installed_pkgs(
self, mock_avail_get_manifests, mock_inst_get_manifests):
result_not_installed = {'status': 404, 'packages': []}
available_pkgs = {'status': 200,
'packages': [{
'name1': 'pkg1',
'status': 'available'}]}
mock_inst_get_manifests.return_value = result_not_installed
mock_avail_get_manifests.return_value = available_pkgs
response = get_package_list()
self.assertEqual(response['status'], 200)
self.assertEqual(response['packages'],
available_pkgs['packages'])
# @patch('localapi.catalog.get_package_manifests')
# def test_get_package_list_clientException(self, mock_get_manifests):
# error_message = 'get_package_manifests ClientException'
# mock_get_manifests.side_effect = ClientException(error)
#
# result = get_package_list()
# self.assertRaises(ClientException, mock_get_manifests, error_message)
@mock.patch('localapi.catalog.get_available_package_manifests')
def test_get_package_list_exception(self, mock_avail_manifests):
error_message = 'Exception'
mock_avail_manifests.side_effect = Exception(error_message)
result = get_package_list()
self.assertEqual(result['status'], 500)
self.assertEqual(result['packages'], [])
# @patch('localapi.catalog.get_package_manifests')
# def test_get_package_list_clientException(
# self, mock_getPackageManifests):
# error_message = 'GET MANIFEST ERROR'
# mock_getPackageManifests.side_effect = ClientException(error_message)
#
# result = get_package_list()
# self.assertRaises(
# ClientException, mock_getPackageManifests, error_message)
#
# @patch('localapi.catalog.get_package_manifests')
# def test_get_package_list_exception(self, mock_getPackageManifests):
# error_message = 'GET MANIFEST ERROR'
# mock_getPackageManifests.side_effect = Exception(error_message)
#
# result = get_package_list()
# self.assertRaises(Exception, error_message)
# ========================================
# = install_package
# ========================================
@mock.patch('localapi.catalog.write_package')
@mock.patch('localapi.catalog.get_available_package')
def test_install_package(self, mock_get_avail_pkg, mocked_write):
fake_fileobj = 'fake-fileobj'
mock_get_avail_pkg.return_value = fake_fileobj
install_package('dummy')
mocked_write.assert_called_with('dummy', fake_fileobj)
# ToDo - REMOVE - ClientException no longer handled in catalog module.
# @mock.patch('localapi.catalog.write_package')
# @mock.patch('localapi.catalog.get_available_package')
# def test_install_package_raises_client_exception(
# self, mock_get_avail_pkg, mocked_write):
# error_message = 'ERROR'
# mock_get_avail_pkg.side_effect = ClientException(error_message)
#
# install_package('dummy')
# self.assertRaises(
# ClientException, mock_get_avail_pkg, error_message)
@mock.patch('localapi.catalog.write_package')
@mock.patch('localapi.catalog.get_available_package')
def test_install_package_raises_exception(
self, mock_get_avail_pkg, mocked_write):
error_message = 'ERROR'
mock_get_avail_pkg.side_effect = Exception(error_message)
install_package('dummy')
self.assertRaises(Exception, error_message)
| |
import os
import tornado.httpserver
import tornado.ioloop
import tornado.web
from creds import *
from requests import Request
import requests
import json
import re
import tempfile
import redis
import uuid
from pydub import AudioSegment
import random
import string
def gettoken(uid):
red = redis.from_url(redis_url)
token = red.get(uid+"-access_token")
refresh = red.get(uid+"-refresh_token")
if token:
return token
elif refresh:
payload = {"client_id" : Client_ID, "client_secret" : Client_Secret, "refresh_token" : refresh, "grant_type" : "refresh_token", }
url = "https://api.amazon.com/auth/o2/token"
r = requests.post(url, data = payload)
resp = json.loads(r.text)
red.set(uid+"-access_token", resp['access_token'])
red.expire(uid+"-access_token", 3600)
return resp['access_token']
else:
return False
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_cookie("user")
class MainHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
f = open('static/index.html', 'r')
resp = f.read()
f.close()
self.write(resp)
self.finish()
class StartAuthHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
scope="alexa_all"
sd = json.dumps({
"alexa:all": {
"productID": Product_ID,
"productInstanceAttributes": {
"deviceSerialNumber": "1"
}
}
})
url = "https://www.amazon.com/ap/oa"
path = self.request.protocol + "://" + self.request.host
callback = path + "/code"
payload = {"client_id" : Client_ID, "scope" : "alexa:all", "scope_data" : sd, "response_type" : "code", "redirect_uri" : callback }
req = Request('GET', url, params=payload)
p = req.prepare()
self.redirect(p.url)
class CodeAuthHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
code=self.get_argument("code")
path = self.request.protocol + "://" + self.request.host
callback = path+"/code"
payload = {"client_id" : Client_ID, "client_secret" : Client_Secret, "code" : code, "grant_type" : "authorization_code", "redirect_uri" : callback }
url = "https://api.amazon.com/auth/o2/token"
r = requests.post(url, data = payload)
uid = str(uuid.uuid4())
red = redis.from_url(redis_url)
resp = json.loads(r.text)
red.set(uid+"-access_token", resp['access_token'])
red.expire(uid+"-access_token", 3600)
red.set(uid+"-refresh_token", resp['refresh_token'])
self.set_cookie("user", uid)
self.redirect("/")
class LogoutHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
uid = tornado.escape.xhtml_escape(self.current_user)
red = redis.from_url(redis_url)
red.delete(uid+"-access_token")
red.delete(uid+"-refresh_token")
self.clear_cookie("user")
self.set_header('Content-Type', 'text/plain')
self.write("Logged Out, Goodbye")
self.finish()
class AudioHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.asynchronous
def post(self):
uid = tornado.escape.xhtml_escape(self.current_user)
token = gettoken(uid)
if (token == False):
self.set_status(403)
else:
rxfile = self.request.files['data'][0]['body']
tf = tempfile.NamedTemporaryFile(suffix=".wav")
tf.write(rxfile)
_input = AudioSegment.from_wav(tf.name)
tf.close()
tf = tempfile.NamedTemporaryFile(suffix=".wav")
output = _input.set_channels(1).set_frame_rate(16000)
f = output.export(tf.name, format="wav")
url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'
headers = {'Authorization' : 'Bearer %s' % token}
d = {
"messageHeader": {
"deviceContext": [
{
"name": "playbackState",
"namespace": "AudioPlayer",
"payload": {
"streamId": "",
"offsetInMilliseconds": "0",
"playerActivity": "IDLE"
}
}
]
},
"messageBody": {
"profile": "alexa-close-talk",
"locale": "en-us",
"format": "audio/L16; rate=16000; channels=1"
}
}
files = [
('file', ('request', json.dumps(d), 'application/json; charset=UTF-8')),
('file', ('audio', tf, 'audio/L16; rate=16000; channels=1'))
]
r = requests.post(url, headers=headers, files=files)
tf.close()
for v in r.headers['content-type'].split(";"):
if re.match('.*boundary.*', v):
boundary = v.split("=")[1]
data = r.content.split(boundary)
for d in data:
if (len(d) >= 1024):
audio = d.split('\r\n\r\n')[1].rstrip('--')
self.set_header('Content-Type', 'audio/mpeg')
self.write(audio)
self.finish()
# START NEW STUFF
class TriggerHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
red = redis.from_url(redis_url)
# print self.get_current_user(), "=>", red.get(self.get_current_user() + "-trigger")
status = red.get(self.get_current_user() + "-trigger")
status = True if status == "True" else False
self.write({"trigger": status})
red.set(self.get_current_user() + "-trigger", False)
self.finish()
class QuestionHandler(BaseHandler):
# @tornado.web.authenticated
def get_email_from_token(self, token):
url = "https://api.amazon.com/user/profile?access_token={}".format(token)
req = requests.get(url)
return str(json.loads(req.text)['email'])
@tornado.web.asynchronous
def get(self):
red = redis.from_url(redis_url)
email = self.get_email_from_token(self.get_argument("access_token"))
print email
res = red.get(email + "-questions")
res = res if res != None else ""
self.write(res)
self.finish()
@tornado.web.asynchronous
def post(self):
red = redis.from_url(redis_url)
email = self.get_argument("email")
print email
red.set(email + "-questions", self.get_argument("questions"))
print self.get_argument("questions")
self.write({"success": True})
self.finish()
class TextHandler(BaseHandler):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
text = self.get_argument("text", None, True)
uid = tornado.escape.xhtml_escape(self.current_user)
token = gettoken(uid)
if (token == False):
self.set_status(403)
else:
# rxfile = self.request.files['data'][0]['body']
# tf = tempfile.NamedTemporaryFile(suffix=".wav")
# tf.write(rxfile)
# _input = AudioSegment.from_wav(tf.name)
# tf.close()
# tf = tempfile.NamedTemporaryFile(suffix=".wav")
# output = _input.set_channels(1).set_frame_rate(16000)
# f = output.export(tf.name, format="wav")
random_str = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(20))
file_name = "/tmp/{}.wav".format(random_str)
cmd = 'espeak "{}" --stdout > {}'.format(text, file_name)
# print text
# print file_name
# print cmd
os.system(cmd)
_input = AudioSegment.from_wav(file_name)
tf = tempfile.NamedTemporaryFile(suffix=".wav")
output = _input.set_channels(1).set_frame_rate(16000)
f = output.export(tf.name, format="wav")
#tf = open(file_name)
url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'
headers = {'Authorization' : 'Bearer %s' % token}
d = {
"messageHeader": {
"deviceContext": [
{
"name": "playbackState",
"namespace": "AudioPlayer",
"payload": {
"streamId": "",
"offsetInMilliseconds": "0",
"playerActivity": "IDLE"
}
}
]
},
"messageBody": {
"profile": "alexa-close-talk",
"locale": "en-us",
"format": "audio/L16; rate=16000; channels=1"
}
}
files = [
('file', ('request', json.dumps(d), 'application/json; charset=UTF-8')),
('file', ('audio', tf, 'audio/L16; rate=16000; channels=1'))
]
r = requests.post(url, headers=headers, files=files)
tf.close()
for v in r.headers['content-type'].split(";"):
if re.match('.*boundary.*', v):
boundary = v.split("=")[1]
data = r.content.split(boundary)
for d in data:
if (len(d) >= 1024):
audio = d.split('\r\n\r\n')[1].rstrip('--')
self.set_header('Content-Type', 'audio/mpeg')
self.write(audio)
self.finish()
# END NEW STUFF
def main():
settings = {
"cookie_secret": "parisPOLANDbroadFENCEcornWOULD",
"login_url": "/static/welcome.html",
}
static_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
application = tornado.web.Application([(r"/", MainHandler),
(r"/start", StartAuthHandler),
(r"/code", CodeAuthHandler),
(r"/logout", LogoutHandler),
(r"/audio", AudioHandler),
(r"/questions", QuestionHandler),
(r"/trigger", TriggerHandler),
(r"/text", TextHandler),
(r'/(favicon.ico)', tornado.web.StaticFileHandler,{'path': static_path}),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': static_path}),
], **settings)
http_server = tornado.httpserver.HTTPServer(application, ssl_options={
'certfile' : '/etc/letsencrypt/live/trainbrain.me/fullchain.pem',
'keyfile' : '/etc/letsencrypt/live/trainbrain.me/privkey.pem'
})
port = int(os.environ.get("PORT", 8080))
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| |
"""Serves an Angular frontend and information about a running OpenHTF test.
This server does not currently support more than one test running in the same
process. However, the dashboard server (dashboard_server.py) can be used to
aggregate info from multiple station servers with a single frontend.
"""
import contextlib
import itertools
import json
import logging
import os
import re
import socket
import threading
import time
import types
import openhtf
from openhtf.output.servers import pub_sub
from openhtf.output.servers import web_gui_server
from openhtf.util import conf
from openhtf.util import data
from openhtf.util import functions
from openhtf.util import multicast
from openhtf.util import timeouts
import six
import sockjs.tornado
STATION_SERVER_TYPE = 'station'
MULTICAST_QUERY = 'OPENHTF_DISCOVERY'
TEST_STATUS_COMPLETED = 'COMPLETED'
_LOG = logging.getLogger(__name__)
# Constants related to response times within the server.
_CHECK_FOR_FINISHED_TEST_POLL_S = 0.5
_DEFAULT_FRONTEND_THROTTLE_S = 0.15
_WAIT_FOR_ANY_EVENT_POLL_S = 0.05
_WAIT_FOR_EXECUTING_TEST_POLL_S = 0.1
conf.declare(
'frontend_throttle_s',
default_value=_DEFAULT_FRONTEND_THROTTLE_S,
description=('Min wait time between successive updates to the '
'frontend.'))
conf.declare(
'station_server_port',
default_value=0,
description=('Port on which to serve the app. If set to zero (the '
'default) then an arbitrary port will be chosen.'))
# These have default values in openhtf.util.multicast.py.
conf.declare('station_discovery_address')
conf.declare('station_discovery_port')
conf.declare('station_discovery_ttl')
def _get_executing_test():
"""Get the currently executing test and its state.
When this function returns, it is not guaranteed that the returned test is
still running. A consumer of this function that wants to access test.state is
exposed to a race condition in which test.state may become None at any time
due to the test finishing. To address this, in addition to returning the test
itself, this function returns the last known test state.
Returns:
test: The test that was executing when this function was called, or None.
test_state: The state of the executing test, or None.
"""
tests = list(six.itervalues(openhtf.Test.TEST_INSTANCES))
if not tests:
return None, None
if len(tests) > 1:
_LOG.warning('Station server does not support multiple executing tests.')
test = tests[0]
test_state = test.state
if test_state is None:
# This is the case if:
# 1. The test executor was created but has not started running.
# 2. The test finished while this function was running, after we got the
# list of tests but before we accessed the test state.
return None, None
return test, test_state
def _test_state_from_record(test_record_dict, execution_uid=None):
"""Convert a test record dict to a test state dict.
Args:
test_record_dict: An OpenHTF TestRecord, converted to base types.
execution_uid: Execution ID of the running test.
Returns:
Dictionary representation of a test's final state. On top of the fields from
TestState._asdict() we add 'execution_uid' which is needed by the
frontend app.
"""
return {
'execution_uid': execution_uid,
'plugs': {
'plug_states': {},
},
'running_phase_state': None,
'status': TEST_STATUS_COMPLETED,
'test_record': test_record_dict,
}
def _wait_for_any_event(events, timeout_s):
"""Wait for any in a list of threading.Event's to be set.
Args:
events: List of threading.Event's.
timeout_s: Max duration in seconds to wait before returning.
Returns:
True if at least one event was set before the timeout expired, else False.
"""
def any_event_set():
return any(event.is_set() for event in events)
result = timeouts.loop_until_timeout_or_true(
timeout_s, any_event_set, sleep_s=_WAIT_FOR_ANY_EVENT_POLL_S)
return result or any_event_set()
class StationWatcher(threading.Thread):
"""Watches for changes in the state of the currently running OpenHTF test.
The StationWatcher uses an event-based mechanism to detect changes in test
state. This means we rely on the OpenHTF framework to call notify_update()
when a change occurs. Authors of frontend-aware plugs must ensure that
notify_update() is called when a change occurs to that plug's state.
"""
daemon = True
def __init__(self, update_callback):
super(StationWatcher, self).__init__(name=type(self).__name__)
self._update_callback = update_callback
def run(self):
"""Call self._poll_for_update() in a loop and handle errors."""
while True:
try:
self._poll_for_update()
except RuntimeError as error:
# Note that because logging triggers a call to notify_update(), by
# logging a message, we automatically retry publishing the update
# after an error occurs.
if error.args[0] == 'dictionary changed size during iteration':
# These errors occur occasionally and it is infeasible to get rid of
# them entirely unless data.convert_to_base_types() is made
# thread-safe. Ignore the error and retry quickly.
_LOG.debug('Ignoring (probably harmless) error in station watcher: '
'`dictionary changed size during iteration`.')
time.sleep(0.1)
else:
_LOG.exception('Error in station watcher: %s', error)
time.sleep(1)
except Exception as error: # pylint: disable=broad-except
_LOG.exception('Error in station watcher: %s', error)
time.sleep(1)
@functions.call_at_most_every(float(conf.frontend_throttle_s))
def _poll_for_update(self):
"""Call the callback with the current test state, then wait for a change."""
test, test_state = _get_executing_test()
if test is None:
time.sleep(_WAIT_FOR_EXECUTING_TEST_POLL_S)
return
state_dict, event = self._to_dict_with_event(test_state)
self._update_callback(state_dict)
plug_manager = test_state.plug_manager
plug_events = [
plug_manager.get_plug_by_class_path(plug_name).asdict_with_event()[1]
for plug_name in plug_manager.get_frontend_aware_plug_names()
]
events = [event] + plug_events
# Wait for the test state or a plug state to change, or for the previously
# executing test to finish.
while not _wait_for_any_event(events, _CHECK_FOR_FINISHED_TEST_POLL_S):
new_test, _ = _get_executing_test()
if test != new_test:
break
@classmethod
def _to_dict_with_event(cls, test_state):
"""Process a test state into the format we want to send to the frontend."""
original_dict, event = test_state.asdict_with_event()
# This line may produce a 'dictionary changed size during iteration' error.
test_state_dict = data.convert_to_base_types(original_dict)
test_state_dict['execution_uid'] = test_state.execution_uid
return test_state_dict, event
class DashboardPubSub(sockjs.tornado.SockJSConnection):
"""WebSocket endpoint for the list of available stations.
In this case, there is always exactly one available station: the station
running the StationServer. See dashboard_server.py for an implementation of
the dashboard WebSocket endpoint for multiple stations.
TODO(Kenadia): Remove this endpoint from the station server. Since the
frontend knows whether it is running off of a station server or dashboard
server, it should be smart enough not to look for this endpoint on the station
server.
"""
port = None # Set by for_port().
@classmethod
def for_port(cls, port):
"""Returns a new subclass with the port set."""
return type(cls.__name__, (cls,), {'port': port})
def on_open(self, unused_info):
"""Called by the base class when a client connects."""
self.send(self._make_message())
@classmethod
def _make_message(cls):
host = 'localhost'
host_port = '%s:%s' % (host, cls.port)
return {
host_port: {
'station_id': conf.station_id, # From openhtf.core.test_state.
'host': host,
'port': cls.port,
'status': 'ONLINE',
}
}
class StationPubSub(pub_sub.PubSub):
"""WebSocket endpoint for test updates.
The endpoint provides information about the test that is currently running
with this StationServer. Two types of message are sent: 'update' and 'record',
where 'record' indicates the final state of a test.
"""
_lock = threading.Lock() # Required by pub_sub.PubSub.
subscribers = set() # Required by pub_sub.PubSub.
_last_execution_uid = None
_last_message = None
@classmethod
def publish_test_record(cls, test_record):
test_record_dict = data.convert_to_base_types(test_record)
test_state_dict = _test_state_from_record(test_record_dict,
cls._last_execution_uid)
cls._publish_test_state(test_state_dict, 'record')
@classmethod
def publish_update(cls, test_state_dict):
"""Publish the state of the currently executing test."""
cls._publish_test_state(test_state_dict, 'update')
@classmethod
def _publish_test_state(cls, test_state_dict, message_type):
message = {
'state': test_state_dict,
'test_uid': test_state_dict['execution_uid'],
'type': message_type,
}
super(StationPubSub, cls).publish(message)
cls._last_execution_uid = test_state_dict['execution_uid']
cls._last_message = message
def on_subscribe(self, info):
"""Send the more recent test state to new subscribers when they connect.
This is skipped if the test has already completed.
Args:
info: Subscription info.
"""
test, _ = _get_executing_test()
if self._last_message is not None and test is not None:
self.send(self._last_message)
class BaseTestHandler(web_gui_server.CorsRequestHandler):
"""Base class for HTTP endpoints that get test data."""
def get_test(self, test_uid):
"""Get the specified test. Write 404 and return None if it is not found."""
test, test_state = _get_executing_test()
if test is None or str(test.uid) != test_uid:
self.write('Unknown test UID %s' % test_uid)
self.set_status(404)
return None, None
return test, test_state
class AttachmentsHandler(BaseTestHandler):
"""GET endpoint for a file attached to a test."""
def get(self, test_uid, phase_descriptor_id, attachment_name):
_, test_state = self.get_test(test_uid)
if test_state is None:
return
# Find the phase matching `phase_descriptor_id`.
running_phase = test_state.running_phase_state
phase_records = itertools.chain(
test_state.test_record.phases,
[running_phase.phase_record] if running_phase is not None else [])
matched_phase = None
for phase in phase_records:
if str(phase.descriptor_id) == phase_descriptor_id:
matched_phase = phase
break
if matched_phase is None:
self.write('Unknown phase descriptor %s' % phase_descriptor_id)
self.set_status(404)
return
# Find the attachment matching `attachment_name`.
if attachment_name in matched_phase.attachments:
attachment = matched_phase.attachments[attachment_name]
else:
self.write('Unknown attachment %s' % attachment_name)
self.set_status(404)
return
self.set_header('Content-Type', attachment.mimetype)
self.write(attachment.data)
class PhasesHandler(BaseTestHandler):
"""GET endpoint for phase descriptors for a test, i.e. the full phase list."""
def get(self, test_uid):
test, _ = self.get_test(test_uid)
if test is None:
return
phase_descriptors = [
dict(id=id(phase), **data.convert_to_base_types(phase))
for phase in test.descriptor.phase_sequence.all_phases()
]
# Wrap value in a dict because writing a list directly is prohibited.
self.write({'data': phase_descriptors})
class PlugsHandler(BaseTestHandler):
"""POST endpoints to receive plug responses from the frontend."""
def post(self, test_uid, plug_name):
_, test_state = self.get_test(test_uid)
if test_state is None:
return
# Find the plug matching `plug_name`.
plug = test_state.plug_manager.get_plug_by_class_path(plug_name)
if plug is None:
self.write('Unknown plug %s' % plug_name)
self.set_status(404)
return
try:
request = json.loads(self.request.body.decode('utf-8'))
method_name = request['method']
args = request['args']
except (KeyError, ValueError):
self.write('Malformed JSON request.')
self.set_status(400)
return
method = getattr(plug, method_name, None)
if not (plug.enable_remote and isinstance(method, types.MethodType) and
not method_name.startswith('_') and
method_name not in plug.disable_remote_attrs):
self.write('Cannot access method %s of plug %s.' %
(method_name, plug_name))
self.set_status(400)
return
try:
response = json.dumps(method(*args))
except Exception as e: # pylint: disable=broad-except
self.write('Plug error: %s' % repr(e))
self.set_status(500)
else:
self.write(response)
class BaseHistoryHandler(web_gui_server.CorsRequestHandler):
history_path = None
def initialize(self, history_path):
self.history_path = history_path
class HistoryListHandler(BaseHistoryHandler):
"""GET endpoint for the list of tests in the history.
When requesting the history list, we respond with all files in the history
folder ending with the '.pb' extension. Ideally, file names should match the
following form (see chtf.py):
'mfg_event_{dut_id}_{start_time_millis}.pb'
The requester can filter the returned history items by passing DUT ID and/or
start time as query parameters.
"""
def get(self):
filter_dut_id = self.get_arguments('dutId')
filter_start_time_millis = self.get_arguments('startTimeMillis')
history_items = []
for file_name in os.listdir(self.history_path):
if not file_name.endswith('.pb'):
continue
if not os.path.isfile(os.path.join(self.history_path, file_name)):
continue
dut_id = None
start_time_millis = None
match = re.match(r'mfg_event_(.+)_(\d+)\.pb$', file_name)
if match is not None:
dut_id = match.group(1)
start_time_millis = int(match.group(2))
if filter_dut_id and dut_id not in filter_dut_id:
continue
if (filter_start_time_millis and
str(start_time_millis) not in filter_start_time_millis):
continue
history_items.append({
'dut_id': dut_id,
'file_name': file_name,
'start_time_millis': start_time_millis,
})
# Wrap value in a dict because writing a list directly is prohibited.
self.write({'data': history_items})
class HistoryItemHandler(BaseHistoryHandler):
"""GET endpoint for a test record from the history."""
def get(self, file_name):
# TODO(kenadia): Implement the history item handler. The implementation
# depends on the format used to store test records on disk.
self.write('Not implemented.')
self.set_status(500)
class HistoryAttachmentsHandler(BaseHistoryHandler):
"""GET endpoint for an attachment from an MfgEvent in the history.
The sha1 query parameter is optional and used as a backup to identify an
attachment if the name does not match any known name. Including this parameter
is recommended, as some systems may modify attachment names when storing them
on the MfgEvent in the case where multiple attachments have the same name.
"""
def get(self, file_name, attachment_name):
# TODO(kenadia): Implement the history item handler. The implementation
# depends on the format used to store test records on disk.
self.write('Not implemented.')
self.set_status(500)
class StationMulticast(multicast.MulticastListener):
"""Announce the existence of a station server to any searching dashboards."""
def __init__(self, station_server_port):
# These have default values in openhtf.util.multicast.py.
kwargs = {
attr: conf['station_discovery_%s' % attr]
for attr in ('address', 'port', 'ttl')
if 'station_discovery_%s' % attr in conf
}
super(StationMulticast, self).__init__(self._make_message, **kwargs)
self.station_server_port = station_server_port
def _make_message(self, message):
if message != MULTICAST_QUERY:
if message == 'OPENHTF_PING':
# Don't log for the old multicast string.
return
_LOG.debug('Got unexpected traffic on multicast socket: %s', message)
return
_, test_state = _get_executing_test()
if test_state:
cell = test_state.test_record.metadata.get('cell')
test_description = test_state.test_record.metadata.get('test_description')
test_name = test_state.test_record.metadata.get('test_name')
else:
cell = None
test_description = None
test_name = None
return json.dumps({
'cell': cell,
'port': self.station_server_port,
'station_id': conf.station_id, # From openhtf.core.test_state.
'test_description': test_description,
'test_name': test_name,
})
class StationServer(web_gui_server.WebGuiServer):
"""Provides endpoints for interacting with an OpenHTF test.
Also serves an Angular frontend that interfaces with those endpoints.
Can be used as a context manager to ensure the server is stopped cleanly:
with StationServer(history_path) as server:
test = openhtf.Test(*my_phases)
test.add_output_callbacks(server.publish_final_state)
test.execute()
Can also be used via the maybe_run() helper function:
with maybe_run(should_run, history_path) as server:
test = openhtf.Test(*my_phases)
if server:
test.add_output_callbacks(server.publish_final_state)
test.execute()
"""
def __init__(self, history_path=None):
# Disable tornado's logging.
# TODO(kenadia): Enable these logs if verbosity flag is at least -vvv.
# I think this will require changing how StoreRepsInModule works.
# Currently, if we call logs.ARG_PARSER.parse_known_args() multiple
# times, we multiply the number of v's that we get.
tornado_logger = logging.getLogger('tornado')
tornado_logger.propagate = False
if not tornado_logger.handlers:
tornado_logger.addHandler(logging.NullHandler())
# Bind port early so that the correct port number can be used in the routes.
sockets, port = web_gui_server.bind_port(int(conf.station_server_port))
# Set up the station watcher.
station_watcher = StationWatcher(StationPubSub.publish_update)
station_watcher.start()
# Set up the SockJS endpoints.
dashboard_class = DashboardPubSub.for_port(port)
dash_router = sockjs.tornado.SockJSRouter(dashboard_class, '/sub/dashboard')
station_router = sockjs.tornado.SockJSRouter(StationPubSub, '/sub/station')
routes = dash_router.urls + station_router.urls
# Set up the other endpoints.
routes.extend((
(r'/tests/(?P<test_uid>[\w\d:]+)/phases', PhasesHandler),
(r'/tests/(?P<test_uid>[\w\d:]+)/plugs/(?P<plug_name>.+)',
PlugsHandler),
(r'/tests/(?P<test_uid>[\w\d:]+)/phases/(?P<phase_descriptor_id>\d+)/'
'attachments/(?P<attachment_name>.+)', AttachmentsHandler),
))
# Optionally enable history from disk.
if history_path is not None:
routes.extend((
(r'/history', HistoryListHandler, {
'history_path': history_path
}),
(r'/history/(?P<file_name>[^/]+)', HistoryItemHandler, {
'history_path': history_path
}),
(r'/history/(?P<file_name>[^/]+)/attachments/(?P<attachment_name>.+)',
HistoryAttachmentsHandler, {
'history_path': history_path
}),
))
super(StationServer, self).__init__(routes, port, sockets=sockets)
self.station_multicast = StationMulticast(port)
def _get_config(self):
return {
'server_type': STATION_SERVER_TYPE,
}
def run(self):
_LOG.info('Announcing station server via multicast on %s:%s',
self.station_multicast.address, self.station_multicast.port)
self.station_multicast.start()
_LOG.info('Starting station server at:\n' # pylint: disable=logging-format-interpolation
' Local: http://localhost:{port}\n'
' Remote: http://{host}:{port}'.format(
host=socket.gethostname(), port=self.port))
super(StationServer, self).run()
def stop(self):
_LOG.info('Stopping station server.')
super(StationServer, self).stop()
_LOG.info('Stopping multicast.')
self.station_multicast.stop(timeout_s=0)
def publish_final_state(self, test_record):
"""Test output callback publishing a final state from the test record."""
StationPubSub.publish_test_record(test_record)
@contextlib.contextmanager
def maybe_run(should_run, history_path=None):
"""Provides a context which conditionally runs a StationServer."""
if not should_run:
yield
return
with StationServer(history_path) as server:
yield server
| |
# -*- test-case-name: twisted.test.test_factories,twisted.internet.test.test_protocol -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standard implementations of Twisted protocol-related interfaces.
Start here if you are looking to write a new protocol implementation for
Twisted. The Protocol class contains some introductory material.
"""
from __future__ import division, absolute_import
import random
from zope.interface import implementer
from twisted.python import log, failure, components
from twisted.internet import interfaces, error, defer
@implementer(interfaces.IProtocolFactory, interfaces.ILoggingContext)
class Factory:
"""
This is a factory which produces protocols.
By default, buildProtocol will create a protocol of the class given in
self.protocol.
"""
# put a subclass of Protocol here:
protocol = None
numPorts = 0
noisy = True
@classmethod
def forProtocol(cls, protocol, *args, **kwargs):
"""
Create a factory for the given protocol.
It sets the C{protocol} attribute and returns the constructed factory
instance.
@param protocol: A L{Protocol} subclass
@param args: Positional arguments for the factory.
@param kwargs: Keyword arguments for the factory.
@return: A L{Factory} instance wired up to C{protocol}.
"""
factory = cls(*args, **kwargs)
factory.protocol = protocol
return factory
def logPrefix(self):
"""
Describe this factory for log messages.
"""
return self.__class__.__name__
def doStart(self):
"""Make sure startFactory is called.
Users should not call this function themselves!
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting factory %r" % self)
self.startFactory()
self.numPorts = self.numPorts + 1
def doStop(self):
"""Make sure stopFactory is called.
Users should not call this function themselves!
"""
if self.numPorts == 0:
# this shouldn't happen, but does sometimes and this is better
# than blowing up in assert as we did previously.
return
self.numPorts = self.numPorts - 1
if not self.numPorts:
if self.noisy:
log.msg("Stopping factory %r" % self)
self.stopFactory()
def startFactory(self):
"""This will be called before I begin listening on a Port or Connector.
It will only be called once, even if the factory is connected
to multiple ports.
This can be used to perform 'unserialization' tasks that
are best put off until things are actually running, such
as connecting to a database, opening files, etcetera.
"""
def stopFactory(self):
"""This will be called before I stop listening on all Ports/Connectors.
This can be overridden to perform 'shutdown' tasks such as disconnecting
database connections, closing files, etc.
It will be called, for example, before an application shuts down,
if it was connected to a port. User code should not call this function
directly.
"""
def buildProtocol(self, addr):
"""Create an instance of a subclass of Protocol.
The returned instance will handle input on an incoming server
connection, and an attribute \"factory\" pointing to the creating
factory.
Override this method to alter how Protocol instances get created.
@param addr: an object implementing L{twisted.internet.interfaces.IAddress}
"""
p = self.protocol()
p.factory = self
return p
class ClientFactory(Factory):
"""A Protocol factory for clients.
This can be used together with the various connectXXX methods in
reactors.
"""
def startedConnecting(self, connector):
"""Called when a connection has been started.
You can call connector.stopConnecting() to stop the connection attempt.
@param connector: a Connector object.
"""
def clientConnectionFailed(self, connector, reason):
"""Called when a connection has failed to connect.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
def clientConnectionLost(self, connector, reason):
"""Called when an established connection is lost.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
class _InstanceFactory(ClientFactory):
"""
Factory used by ClientCreator.
@ivar deferred: The L{Deferred} which represents this connection attempt and
which will be fired when it succeeds or fails.
@ivar pending: After a connection attempt succeeds or fails, a delayed call
which will fire the L{Deferred} representing this connection attempt.
"""
noisy = False
pending = None
def __init__(self, reactor, instance, deferred):
self.reactor = reactor
self.instance = instance
self.deferred = deferred
def __repr__(self):
return "<ClientCreator factory: %r>" % (self.instance, )
def buildProtocol(self, addr):
"""
Return the pre-constructed protocol instance and arrange to fire the
waiting L{Deferred} to indicate success establishing the connection.
"""
self.pending = self.reactor.callLater(
0, self.fire, self.deferred.callback, self.instance)
self.deferred = None
return self.instance
def clientConnectionFailed(self, connector, reason):
"""
Arrange to fire the waiting L{Deferred} with the given failure to
indicate the connection could not be established.
"""
self.pending = self.reactor.callLater(
0, self.fire, self.deferred.errback, reason)
self.deferred = None
def fire(self, func, value):
"""
Clear C{self.pending} to avoid a reference cycle and then invoke func
with the value.
"""
self.pending = None
func(value)
class ClientCreator:
"""
Client connections that do not require a factory.
The various connect* methods create a protocol instance using the given
protocol class and arguments, and connect it, returning a Deferred of the
resulting protocol instance.
Useful for cases when we don't really need a factory. Mainly this
is when there is no shared state between protocol instances, and no need
to reconnect.
The C{connectTCP}, C{connectUNIX}, and C{connectSSL} methods each return a
L{Deferred} which will fire with an instance of the protocol class passed to
L{ClientCreator.__init__}. These Deferred can be cancelled to abort the
connection attempt (in a very unlikely case, cancelling the Deferred may not
prevent the protocol from being instantiated and connected to a transport;
if this happens, it will be disconnected immediately afterwards and the
Deferred will still errback with L{CancelledError}).
"""
def __init__(self, reactor, protocolClass, *args, **kwargs):
self.reactor = reactor
self.protocolClass = protocolClass
self.args = args
self.kwargs = kwargs
def _connect(self, method, *args, **kwargs):
"""
Initiate a connection attempt.
@param method: A callable which will actually start the connection
attempt. For example, C{reactor.connectTCP}.
@param *args: Positional arguments to pass to C{method}, excluding the
factory.
@param **kwargs: Keyword arguments to pass to C{method}.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
def cancelConnect(deferred):
connector.disconnect()
if f.pending is not None:
f.pending.cancel()
d = defer.Deferred(cancelConnect)
f = _InstanceFactory(
self.reactor, self.protocolClass(*self.args, **self.kwargs), d)
connector = method(factory=f, *args, **kwargs)
return d
def connectTCP(self, host, port, timeout=30, bindAddress=None):
"""
Connect to a TCP server.
The parameters are all the same as to L{IReactorTCP.connectTCP} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectTCP, host, port, timeout=timeout,
bindAddress=bindAddress)
def connectUNIX(self, address, timeout=30, checkPID=False):
"""
Connect to a Unix socket.
The parameters are all the same as to L{IReactorUNIX.connectUNIX} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectUNIX, address, timeout=timeout,
checkPID=checkPID)
def connectSSL(self, host, port, contextFactory, timeout=30, bindAddress=None):
"""
Connect to an SSL server.
The parameters are all the same as to L{IReactorSSL.connectSSL} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectSSL, host, port,
contextFactory=contextFactory, timeout=timeout,
bindAddress=bindAddress)
class ReconnectingClientFactory(ClientFactory):
"""
Factory which auto-reconnects clients with an exponential back-off.
Note that clients should call my resetDelay method after they have
connected successfully.
@ivar maxDelay: Maximum number of seconds between connection attempts.
@ivar initialDelay: Delay for the first reconnection attempt.
@ivar factor: A multiplicitive factor by which the delay grows
@ivar jitter: Percentage of randomness to introduce into the delay length
to prevent stampeding.
@ivar clock: The clock used to schedule reconnection. It's mainly useful to
be parametrized in tests. If the factory is serialized, this attribute
will not be serialized, and the default value (the reactor) will be
restored when deserialized.
@type clock: L{IReactorTime}
@ivar maxRetries: Maximum number of consecutive unsuccessful connection
attempts, after which no further connection attempts will be made. If
this is not explicitly set, no maximum is applied.
"""
maxDelay = 3600
initialDelay = 1.0
# Note: These highly sensitive factors have been precisely measured by
# the National Institute of Science and Technology. Take extreme care
# in altering them, or you may damage your Internet!
# (Seriously: <http://physics.nist.gov/cuu/Constants/index.html>)
factor = 2.7182818284590451 # (math.e)
# Phi = 1.6180339887498948 # (Phi is acceptable for use as a
# factor if e is too large for your application.)
jitter = 0.11962656472 # molar Planck constant times c, joule meter/mole
delay = initialDelay
retries = 0
maxRetries = None
_callID = None
connector = None
clock = None
continueTrying = 1
def clientConnectionFailed(self, connector, reason):
if self.continueTrying:
self.connector = connector
self.retry()
def clientConnectionLost(self, connector, unused_reason):
if self.continueTrying:
self.connector = connector
self.retry()
def retry(self, connector=None):
"""
Have this connector connect again, after a suitable delay.
"""
if not self.continueTrying:
if self.noisy:
log.msg("Abandoning %s on explicit request" % (connector,))
return
if connector is None:
if self.connector is None:
raise ValueError("no connector to retry")
else:
connector = self.connector
self.retries += 1
if self.maxRetries is not None and (self.retries > self.maxRetries):
if self.noisy:
log.msg("Abandoning %s after %d retries." %
(connector, self.retries))
return
self.delay = min(self.delay * self.factor, self.maxDelay)
if self.jitter:
self.delay = random.normalvariate(self.delay,
self.delay * self.jitter)
if self.noisy:
log.msg("%s will retry in %d seconds" % (connector, self.delay,))
def reconnector():
self._callID = None
connector.connect()
if self.clock is None:
from twisted.internet import reactor
self.clock = reactor
self._callID = self.clock.callLater(self.delay, reconnector)
def stopTrying(self):
"""
Put a stop to any attempt to reconnect in progress.
"""
# ??? Is this function really stopFactory?
if self._callID:
self._callID.cancel()
self._callID = None
self.continueTrying = 0
if self.connector:
try:
self.connector.stopConnecting()
except error.NotConnectingError:
pass
def resetDelay(self):
"""
Call this method after a successful connection: it resets the delay and
the retry counter.
"""
self.delay = self.initialDelay
self.retries = 0
self._callID = None
self.continueTrying = 1
def __getstate__(self):
"""
Remove all of the state which is mutated by connection attempts and
failures, returning just the state which describes how reconnections
should be attempted. This will make the unserialized instance
behave just as this one did when it was first instantiated.
"""
state = self.__dict__.copy()
for key in ['connector', 'retries', 'delay',
'continueTrying', '_callID', 'clock']:
if key in state:
del state[key]
return state
class ServerFactory(Factory):
"""Subclass this to indicate that your protocol.Factory is only usable for servers.
"""
class BaseProtocol:
"""
This is the abstract superclass of all protocols.
Some methods have helpful default implementations here so that they can
easily be shared, but otherwise the direct subclasses of this class are more
interesting, L{Protocol} and L{ProcessProtocol}.
"""
connected = 0
transport = None
def makeConnection(self, transport):
"""Make a connection to a transport and a server.
This sets the 'transport' attribute of this Protocol, and calls the
connectionMade() callback.
"""
self.connected = 1
self.transport = transport
self.connectionMade()
def connectionMade(self):
"""Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
connectionDone=failure.Failure(error.ConnectionDone())
connectionDone.cleanFailure()
@implementer(interfaces.IProtocol, interfaces.ILoggingContext)
class Protocol(BaseProtocol):
"""
This is the base class for streaming connection-oriented protocols.
If you are going to write a new connection-oriented protocol for Twisted,
start here. Any protocol implementation, either client or server, should
be a subclass of this class.
The API is quite simple. Implement L{dataReceived} to handle both
event-based and synchronous input; output can be sent through the
'transport' attribute, which is to be an instance that implements
L{twisted.internet.interfaces.ITransport}. Override C{connectionLost} to be
notified when the connection ends.
Some subclasses exist already to help you write common types of protocols:
see the L{twisted.protocols.basic} module for a few of them.
"""
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
def dataReceived(self, data):
"""Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(self, reason=connectionDone):
"""Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed.
@type reason: L{twisted.python.failure.Failure}
"""
@implementer(interfaces.IConsumer)
class ProtocolToConsumerAdapter(components.Adapter):
def write(self, data):
self.original.dataReceived(data)
def registerProducer(self, producer, streaming):
pass
def unregisterProducer(self):
pass
components.registerAdapter(ProtocolToConsumerAdapter, interfaces.IProtocol,
interfaces.IConsumer)
@implementer(interfaces.IProtocol)
class ConsumerToProtocolAdapter(components.Adapter):
def dataReceived(self, data):
self.original.write(data)
def connectionLost(self, reason):
pass
def makeConnection(self, transport):
pass
def connectionMade(self):
pass
components.registerAdapter(ConsumerToProtocolAdapter, interfaces.IConsumer,
interfaces.IProtocol)
@implementer(interfaces.IProcessProtocol)
class ProcessProtocol(BaseProtocol):
"""
Base process protocol implementation which does simple dispatching for
stdin, stdout, and stderr file descriptors.
"""
def childDataReceived(self, childFD, data):
if childFD == 1:
self.outReceived(data)
elif childFD == 2:
self.errReceived(data)
def outReceived(self, data):
"""
Some data was received from stdout.
"""
def errReceived(self, data):
"""
Some data was received from stderr.
"""
def childConnectionLost(self, childFD):
if childFD == 0:
self.inConnectionLost()
elif childFD == 1:
self.outConnectionLost()
elif childFD == 2:
self.errConnectionLost()
def inConnectionLost(self):
"""
This will be called when stdin is closed.
"""
def outConnectionLost(self):
"""
This will be called when stdout is closed.
"""
def errConnectionLost(self):
"""
This will be called when stderr is closed.
"""
def processExited(self, reason):
"""
This will be called when the subprocess exits.
@type reason: L{twisted.python.failure.Failure}
"""
def processEnded(self, reason):
"""
Called when the child process exits and all file descriptors
associated with it have been closed.
@type reason: L{twisted.python.failure.Failure}
"""
class AbstractDatagramProtocol:
"""
Abstract protocol for datagram-oriented transports, e.g. IP, ICMP, ARP, UDP.
"""
transport = None
numPorts = 0
noisy = True
def __getstate__(self):
d = self.__dict__.copy()
d['transport'] = None
return d
def doStart(self):
"""Make sure startProtocol is called.
This will be called by makeConnection(), users should not call it.
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting protocol %s" % self)
self.startProtocol()
self.numPorts = self.numPorts + 1
def doStop(self):
"""Make sure stopProtocol is called.
This will be called by the port, users should not call it.
"""
assert self.numPorts > 0
self.numPorts = self.numPorts - 1
self.transport = None
if not self.numPorts:
if self.noisy:
log.msg("Stopping protocol %s" % self)
self.stopProtocol()
def startProtocol(self):
"""Called when a transport is connected to this protocol.
Will only be called once, even if multiple ports are connected.
"""
def stopProtocol(self):
"""Called when the transport is disconnected.
Will only be called once, after all ports are disconnected.
"""
def makeConnection(self, transport):
"""Make a connection to a transport and a server.
This sets the 'transport' attribute of this DatagramProtocol, and calls the
doStart() callback.
"""
assert self.transport == None
self.transport = transport
self.doStart()
def datagramReceived(self, datagram, addr):
"""Called when a datagram is received.
@param datagram: the string received from the transport.
@param addr: tuple of source of datagram.
"""
@implementer(interfaces.ILoggingContext)
class DatagramProtocol(AbstractDatagramProtocol):
"""
Protocol for datagram-oriented transport, e.g. UDP.
@type transport: C{NoneType} or
L{IUDPTransport<twisted.internet.interfaces.IUDPTransport>} provider
@ivar transport: The transport with which this protocol is associated,
if it is associated with one.
"""
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
def connectionRefused(self):
"""Called due to error from write in connected mode.
Note this is a result of ICMP message generated by *previous*
write.
"""
class ConnectedDatagramProtocol(DatagramProtocol):
"""Protocol for connected datagram-oriented transport.
No longer necessary for UDP.
"""
def datagramReceived(self, datagram):
"""Called when a datagram is received.
@param datagram: the string received from the transport.
"""
def connectionFailed(self, failure):
"""Called if connecting failed.
Usually this will be due to a DNS lookup failure.
"""
@implementer(interfaces.ITransport)
class FileWrapper:
"""A wrapper around a file-like object to make it behave as a Transport.
This doesn't actually stream the file to the attached protocol,
and is thus useful mainly as a utility for debugging protocols.
"""
closed = 0
disconnecting = 0
producer = None
streamingProducer = 0
def __init__(self, file):
self.file = file
def write(self, data):
try:
self.file.write(data)
except:
self.handleException()
# self._checkProducer()
def _checkProducer(self):
# Cheating; this is called at "idle" times to allow producers to be
# found and dealt with
if self.producer:
self.producer.resumeProducing()
def registerProducer(self, producer, streaming):
"""From abstract.FileDescriptor
"""
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
self.producer = None
def stopConsuming(self):
self.unregisterProducer()
self.loseConnection()
def writeSequence(self, iovec):
self.write("".join(iovec))
def loseConnection(self):
self.closed = 1
try:
self.file.close()
except (IOError, OSError):
self.handleException()
def getPeer(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file', 'file'
def getHost(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file'
def handleException(self):
pass
def resumeProducing(self):
# Never sends data anyways
pass
def pauseProducing(self):
# Never sends data anyways
pass
def stopProducing(self):
self.loseConnection()
__all__ = ["Factory", "ClientFactory", "ReconnectingClientFactory", "connectionDone",
"Protocol", "ProcessProtocol", "FileWrapper", "ServerFactory",
"AbstractDatagramProtocol", "DatagramProtocol", "ConnectedDatagramProtocol",
"ClientCreator"]
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment functions."""
import ast
import functools
import os
import re
import socket
import subprocess
import sys
import six
import yaml
from clusterfuzz._internal import fuzzing
# Tools supporting customization of options via ADDITIONAL_{TOOL_NAME}_OPTIONS.
# FIXME: Support ADDITIONAL_UBSAN_OPTIONS and ADDITIONAL_LSAN_OPTIONS in an
# ASAN instrumented build.
SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS = [
'HWASAN', 'ASAN', 'KASAN', 'CFI', 'MSAN', 'TSAN', 'UBSAN'
]
SANITIZER_NAME_MAP = {
'ASAN': 'address',
'CFI': 'cfi',
'MSAN': 'memory',
'TSAN': 'thread',
'UBSAN': 'undefined',
}
COMMON_SANITIZER_OPTIONS = {
'handle_abort': 1,
'handle_segv': 1,
'handle_sigbus': 1,
'handle_sigfpe': 1,
'handle_sigill': 1,
'print_summary': 1,
'use_sigaltstack': 1,
}
def _eval_value(value_string):
"""Returns evaluated value."""
try:
return ast.literal_eval(value_string)
except:
# String fallback.
return value_string
def join_memory_tool_options(options):
"""Joins a dict holding memory tool options into a string that can be set in
the environment."""
return ':'.join('%s=%s' % (key, str(value))
for key, value in sorted(six.iteritems(options)))
def _maybe_convert_to_int(value):
"""Returns the int representation contained by string |value| if it contains
one. Otherwise returns |value|."""
try:
return int(value)
except ValueError:
return value
# Matches anything that isn't an unquoted (ie: not between two single or two
# double quotes) colon.
UNQUOTED_COLON_REGEX = re.compile('((?:[^\'":]|\'[^\']*\'|"[^"]*")+)')
def _parse_memory_tool_options(options_str):
"""Parses memory tool options into a dict."""
parsed = {}
for item in UNQUOTED_COLON_REGEX.split(options_str):
# Regex split can give us empty strings at the beginning and the end. Skip
# these.
if not item:
continue
# Regex split gives us each ':'. Skip these.
if item == ':':
continue
values = item.split('=', 1)
if len(values) != 2:
# TODO(mbarbella): Factor this out of environment, and switch to logging
# an error and continuing. This error should be recoverable.
raise ValueError('Invalid memory tool option "%s"' % item)
option_name = values[0]
option_value = _maybe_convert_to_int(values[1])
parsed[option_name] = option_value
return parsed
def _quote_value_if_needed(value):
"""Quote environment value as needed for certain platforms like Windows."""
result = value
if ' ' in result or ':' in result:
result = '"%s"' % result
return result
def copy():
"""Return a safe copy of the environment."""
environment_copy = os.environ.copy()
return environment_copy
def get_asan_options(redzone_size, malloc_context_size, quarantine_size_mb,
bot_platform, leaks, disable_ubsan):
"""Generates default ASAN options."""
asan_options = {}
# Default options needed for all cases.
asan_options['alloc_dealloc_mismatch'] = 0
asan_options['print_scariness'] = 1
asan_options['strict_memcmp'] = 0
# Set provided redzone size.
if redzone_size:
asan_options['redzone'] = redzone_size
# This value is used in determining whether to report OOM crashes or not.
set_value('REDZONE', redzone_size)
# Set maximum number of stack frames to report.
if malloc_context_size:
asan_options['malloc_context_size'] = malloc_context_size
# Set quarantine size.
if quarantine_size_mb:
asan_options['quarantine_size_mb'] = quarantine_size_mb
# Test for leaks if this is an LSan-enabled job type.
if get_value('LSAN') and leaks:
lsan_options = join_memory_tool_options(get_lsan_options())
set_value('LSAN_OPTIONS', lsan_options)
asan_options['detect_leaks'] = 1
else:
remove_key('LSAN_OPTIONS')
asan_options['detect_leaks'] = 0
# FIXME: Support container overflow on Android.
if is_android(bot_platform):
asan_options['detect_container_overflow'] = 0
# Enable stack use-after-return.
asan_options['detect_stack_use_after_return'] = 1
asan_options['max_uar_stack_size_log'] = 16
# Other less important default options for all cases.
asan_options.update({
'allocator_may_return_null': 1,
'allow_user_segv_handler': 0,
'check_malloc_usable_size': 0,
'detect_odr_violation': 0,
'fast_unwind_on_fatal': 1,
'print_suppressions': 0,
})
# Add common sanitizer options.
asan_options.update(COMMON_SANITIZER_OPTIONS)
# FIXME: For Windows, rely on online symbolization since llvm-symbolizer.exe
# in build archive does not work.
asan_options['symbolize'] = int(bot_platform == 'WINDOWS')
# For Android, allow user defined segv handler to work.
if is_android(bot_platform):
asan_options['allow_user_segv_handler'] = 1
# Check if UBSAN is enabled as well for this ASAN build.
# If yes, set UBSAN_OPTIONS and enable suppressions.
if get_value('UBSAN'):
if disable_ubsan:
ubsan_options = get_ubsan_disabled_options()
else:
ubsan_options = get_ubsan_options()
# Remove |symbolize| explicitly to avoid overridding ASan defaults.
ubsan_options.pop('symbolize', None)
set_value('UBSAN_OPTIONS', join_memory_tool_options(ubsan_options))
return asan_options
def get_cpu_arch():
"""Return cpu architecture."""
if is_android() and not is_android_emulator():
# FIXME: Handle this import in a cleaner way.
from clusterfuzz._internal.platforms import android
return android.settings.get_cpu_arch()
# FIXME: Add support for desktop architectures as needed.
return None
def get_current_memory_tool_var():
"""Get the environment variable name for the current job type's sanitizer."""
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
return memory_tool_name + '_OPTIONS'
def get_memory_tool_options(env_var, default_value=None):
"""Get the current memory tool options as a dict. Returns |default_value| if
|env_var| isn't set. Otherwise returns a dictionary containing the memory tool
options and their values."""
env_value = get_value(env_var)
if env_value is not None:
return _parse_memory_tool_options(env_value)
return default_value
def get_instrumented_libraries_paths():
"""Get the instrumented libraries path for the current sanitizer."""
memory_tool_name = get_memory_tool_name(get_value('JOB_NAME'))
if not memory_tool_name:
return None
if memory_tool_name == 'MSAN':
if 'no-origins' in get_value('BUILD_URL', ''):
memory_tool_name += '_NO_ORIGINS'
else:
memory_tool_name += '_CHAINED'
paths = get_value('INSTRUMENTED_LIBRARIES_PATHS_' + memory_tool_name)
if not paths:
return None
return paths.split(':')
def get_default_tool_path(tool_name):
"""Get the default tool for this platform (from scripts/ dir)."""
if is_android():
# For android devices, we do symbolization on the host machine, which is
# linux. So, we use the linux version of llvm-symbolizer.
platform_override = 'linux'
else:
# No override needed, use default.
platform_override = None
tool_filename = get_executable_filename(tool_name)
tool_path = os.path.join(
get_platform_resources_directory(platform_override), tool_filename)
return tool_path
def get_environment_settings_as_string():
"""Return environment settings as a string. Includes settings for memory
debugging tools (e.g. ASAN_OPTIONS for ASAN), application binary revision,
application command line, etc."""
environment_string = ''
# Add Android specific variables.
if is_android():
# FIXME: Handle this import in a cleaner way.
from clusterfuzz._internal.platforms import android
build_fingerprint = get_value(
'BUILD_FINGERPRINT') or android.settings.get_build_fingerprint()
environment_string += '[Environment] Build fingerprint: %s\n' % (
build_fingerprint)
security_patch_level = get_value(
'SECURITY_PATCH_LEVEL') or android.settings.get_security_patch_level()
environment_string += (
'[Environment] Patch level: %s\n' % security_patch_level)
environment_string += (
'[Environment] Local properties file "%s" with contents:\n%s\n' %
(android.device.LOCAL_PROP_PATH,
android.adb.read_data_from_file(android.device.LOCAL_PROP_PATH)))
command_line = get_value('COMMAND_LINE_PATH')
if command_line:
environment_string += (
'[Environment] Command line file "%s" with contents:\n%s\n' %
(command_line, android.adb.read_data_from_file(command_line)))
asan_options = get_value('ASAN_OPTIONS')
if asan_options:
# FIXME: Need better documentation for Chrome builds. Chrome builds use
# asan_device_setup.sh and we send this options file path as an include
# to extra-options parameter.
sanitizer_options_file_path = (
android.sanitizer.get_options_file_path('ASAN'))
environment_string += (
'[Environment] ASAN options file "%s" with contents:\n%s\n' %
(sanitizer_options_file_path, asan_options))
else:
# For desktop platforms, add |*_OPTIONS| variables from environment.
for sanitizer_option in get_sanitizer_options_for_display():
environment_string += '[Environment] %s\n' % sanitizer_option
return environment_string
def get_sanitizer_options_for_display():
"""Return a list of sanitizer options with quoted values."""
result = []
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
options_variable = tool + '_OPTIONS'
options_value = os.getenv(options_variable)
if not options_value:
continue
result.append('{options_variable}={options_value}'.format(
options_variable=options_variable, options_value=options_value))
return result
def get_llvm_symbolizer_path():
"""Get the path of the llvm-symbolizer binary."""
llvm_symbolizer_path = get_value('LLVM_SYMBOLIZER_PATH')
if llvm_symbolizer_path and os.path.exists(llvm_symbolizer_path):
# Make sure that llvm symbolizer binary is executable.
os.chmod(llvm_symbolizer_path, 0o750)
return_code = subprocess.call(
[llvm_symbolizer_path, '--help'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if return_code == 0:
# llvm-symbolize works, return it.
return llvm_symbolizer_path
# Either
# 1. llvm-symbolizer was not found in build archive. OR
# 2. llvm-symbolizer fails due to dependency issue, clang regression, etc.
# So, use our own version of llvm-symbolizer.
llvm_symbolizer_path = get_default_tool_path('llvm-symbolizer')
# Make sure that we have a default llvm-symbolizer for this platform.
if not os.path.exists(llvm_symbolizer_path):
return None
# Make sure that llvm symbolizer binary is executable.
os.chmod(llvm_symbolizer_path, 0o750)
return llvm_symbolizer_path
def get_root_directory():
"""Return root directory."""
return get_value('ROOT_DIR')
def get_startup_scripts_directory():
"""Return path to startup scripts."""
return os.path.join(get_value('ROOT_DIR'), 'src', 'python', 'bot', 'startup')
def get_config_directory():
"""Return the path to the configs directory."""
config_dir = get_value('CONFIG_DIR_OVERRIDE')
if config_dir:
return config_dir
if is_running_on_app_engine():
# Root is already src/appengine.
return 'config'
# Running on bot, give path to config folder inside appengine dir.
return os.path.join(get_root_directory(), 'src', 'appengine', 'config')
def get_gae_config_directory():
"""Return the path to the google appengine configs directory."""
return os.path.join(get_config_directory(), 'gae')
def get_gce_config_directory():
"""Return the path to the google compute engine configs directory."""
return os.path.join(get_config_directory(), 'gce')
def get_resources_directory():
"""Return the path to the resources directory."""
return os.path.join(get_root_directory(), 'resources')
def get_platform_resources_directory(platform_override=None):
"""Return the path to platform-specific resources directory."""
plt = platform_override or platform()
# Android resources share the same android directory.
if is_android(plt):
plt = 'ANDROID'
return os.path.join(get_resources_directory(), 'platform', plt.lower())
def get_suppressions_directory():
"""Return the path to the suppressions directory."""
return os.path.join(get_config_directory(), 'suppressions')
def get_suppressions_file(sanitizer, suffix='suppressions'):
"""Return the path to sanitizer suppressions file, if exists."""
sanitizer_suppressions_filename = '{sanitizer}_{suffix}.txt'.format(
sanitizer=sanitizer, suffix=suffix)
sanitizer_suppressions_file_path = os.path.join(
get_suppressions_directory(), sanitizer_suppressions_filename)
if not os.path.exists(sanitizer_suppressions_file_path):
return None
if not os.path.getsize(sanitizer_suppressions_file_path):
return None
return sanitizer_suppressions_file_path
def get_lsan_options():
"""Generates default LSAN options."""
lsan_suppressions_path = get_suppressions_file('lsan')
lsan_options = {
'print_suppressions': 0,
}
# Add common sanitizer options.
lsan_options.update(COMMON_SANITIZER_OPTIONS)
if lsan_suppressions_path:
lsan_options['suppressions'] = lsan_suppressions_path
return lsan_options
def get_kasan_options():
"""Generates default KASAN options."""
kasan_options = {'symbolize': 0}
# Add common sanitizer options.
kasan_options.update(COMMON_SANITIZER_OPTIONS)
return kasan_options
def get_msan_options():
"""Generates default MSAN options."""
msan_options = {'symbolize': 0}
# Add common sanitizer options.
msan_options.update(COMMON_SANITIZER_OPTIONS)
return msan_options
def get_platform_id():
"""Return a platform id as a lowercase string."""
bot_platform = platform()
if is_android_cuttlefish() or is_android_emulator():
return bot_platform.lower()
if is_android(bot_platform):
# FIXME: Handle this import in a cleaner way.
from clusterfuzz._internal.platforms import android
platform_id = get_value('PLATFORM_ID', android.settings.get_platform_id())
return platform_id.lower()
return bot_platform.lower()
def get_platform_group():
"""Return the platform group (specified via QUEUE_OVERRIDE) if it
exists, otherwise platform()."""
platform_group = get_value('QUEUE_OVERRIDE')
if platform_group:
return platform_group
return platform()
def get_memory_tool_name(job_name):
"""Figures out name of memory debugging tool."""
for tool in SUPPORTED_MEMORY_TOOLS_FOR_OPTIONS:
if tool_matches(tool, job_name):
return tool
# If no tool specified, assume it is ASAN. Also takes care of LSAN job type.
return 'ASAN'
def get_memory_tool_display_string(job_name):
"""Return memory tool string for a testcase."""
memory_tool_name = get_memory_tool_name(job_name)
sanitizer_name = SANITIZER_NAME_MAP.get(memory_tool_name)
if not sanitizer_name:
return 'Memory Tool: %s' % memory_tool_name
return 'Sanitizer: %s (%s)' % (sanitizer_name, memory_tool_name)
def get_executable_filename(executable_name):
"""Return the filename for the given executable."""
if platform() != 'WINDOWS':
return executable_name
extension = '.exe'
if executable_name.endswith(extension):
return executable_name
return executable_name + extension
def get_tsan_options():
"""Generates default TSAN options."""
tsan_suppressions_path = get_suppressions_file('tsan')
tsan_options = {
'atexit_sleep_ms': 200,
'flush_memory_ms': 2000,
'history_size': 3,
'print_suppressions': 0,
'report_thread_leaks': 0,
'report_signal_unsafe': 0,
'stack_trace_format': 'DEFAULT',
'symbolize': 1,
}
# Add common sanitizer options.
tsan_options.update(COMMON_SANITIZER_OPTIONS)
if tsan_suppressions_path:
tsan_options['suppressions'] = tsan_suppressions_path
return tsan_options
def get_ubsan_options():
"""Generates default UBSAN options."""
# Note that UBSAN can work together with ASAN as well.
ubsan_suppressions_path = get_suppressions_file('ubsan')
ubsan_options = {
'halt_on_error': 1,
'print_stacktrace': 1,
'print_suppressions': 0,
# We use -fsanitize=unsigned-integer-overflow as an additional coverage
# signal and do not want those errors to be reported by UBSan as bugs.
# See https://github.com/google/oss-fuzz/issues/910 for additional info.
'silence_unsigned_overflow': 1,
'symbolize': 1,
}
# Add common sanitizer options.
ubsan_options.update(COMMON_SANITIZER_OPTIONS)
# TODO(crbug.com/877070): Make this code configurable on a per job basis.
if ubsan_suppressions_path and not is_chromeos_system_job():
ubsan_options['suppressions'] = ubsan_suppressions_path
return ubsan_options
def get_ubsan_disabled_options():
"""Generates ubsan options """
return {
'halt_on_error': 0,
'print_stacktrace': 0,
'print_suppressions': 0,
}
def get_value_string(environment_variable, default_value=None):
"""Get environment variable (as a string)."""
return os.getenv(environment_variable, default_value)
def get_value(environment_variable, default_value=None):
"""Return an environment variable value."""
value_string = os.getenv(environment_variable)
# value_string will be None if the variable is not defined.
if value_string is None:
return default_value
# Exception for ANDROID_SERIAL. Sometimes serial can be just numbers,
# so we don't want to it eval it.
if environment_variable == 'ANDROID_SERIAL':
return value_string
# Evaluate the value of the environment variable with string fallback.
return _eval_value(value_string)
def _job_substring_match(search_string, job_name):
"""Return a bool on whether a string exists in a provided job name or
use from environment if available (case insensitive)."""
job_name = job_name or get_value('JOB_NAME')
if not job_name:
return False
return search_string in job_name.lower()
def is_afl_job(job_name=None):
"""Return true if the current job uses AFL."""
return get_engine_for_job(job_name) == 'afl'
def is_ios_job(job_name=None):
"""Return True if the current job is for iOS."""
return _job_substring_match('ios_', job_name)
def is_chromeos_job(job_name=None):
"""Return True if the current job is for ChromeOS."""
return _job_substring_match('chromeos', job_name)
def is_lkl_job(job_name=None):
"""Return True if the current job is for ChromeOS."""
return _job_substring_match('lkl', job_name)
def is_chromeos_system_job(job_name=None):
"""Return True if the current job is for ChromeOS system (i.e. not libFuzzer
or entire Chrome browser for Chrome on ChromeOS)."""
return is_chromeos_job(job_name) and get_value('CHROMEOS_SYSTEM')
def is_libfuzzer_job(job_name=None):
"""Return true if the current job uses libFuzzer."""
return get_engine_for_job(job_name) == 'libFuzzer'
def is_honggfuzz_job(job_name=None):
"""Return true if the current job uses honggfuzz."""
return get_engine_for_job(job_name) == 'honggfuzz'
def is_kernel_fuzzer_job(job_name=None):
"""Return true if the current job uses syzkaller."""
return get_engine_for_job(job_name) == 'syzkaller'
def is_engine_fuzzer_job(job_name=None):
"""Return true if this is an engine fuzzer."""
return bool(get_engine_for_job(job_name))
def get_engine_for_job(job_name=None):
"""Get the engine for the given job."""
if not job_name:
job_name = get_value('JOB_NAME')
for engine in fuzzing.ENGINES:
if engine.lower() in job_name:
return engine
return None
def is_posix():
"""Return true if we are on a posix platform (linux/unix and mac os)."""
return os.name == 'posix'
def is_trusted_host(ensure_connected=True):
"""Return whether or not the current bot is a trusted host."""
return get_value('TRUSTED_HOST') and (not ensure_connected or
get_value('WORKER_BOT_NAME'))
def is_untrusted_worker():
"""Return whether or not the current bot is an untrusted worker."""
return get_value('UNTRUSTED_WORKER')
def is_running_on_app_engine():
"""Return True if we are running on appengine (local or production)."""
return (os.getenv('GAE_ENV') or is_running_on_app_engine_development() or
os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/'))
def is_running_on_app_engine_development():
"""Return True if running on the local development appengine server."""
return (os.getenv('GAE_ENV') == 'dev' or
os.getenv('SERVER_SOFTWARE', '').startswith('Development/'))
def parse_environment_definition(environment_string):
"""Parses a job's environment definition."""
if not environment_string:
return {}
definitions = [environment_string.splitlines()]
values = {}
for definition in definitions:
for line in definition:
if line.startswith('#') or not line.strip():
continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
key = m.group(1).strip()
value = m.group(2).strip()
values[key] = value
return values
def platform():
"""Return the operating system type, unless an override is provided."""
environment_override = get_value('OS_OVERRIDE')
if environment_override:
return environment_override.upper()
if sys.platform.startswith('win'):
return 'WINDOWS'
if sys.platform.startswith('linux'):
return 'LINUX'
if sys.platform == 'darwin':
return 'MAC'
raise ValueError('Unsupported platform "%s".' % sys.platform)
def remove_key(key_name):
"""Remove environment |key| and its associated value."""
if not key_name:
return
if key_name not in os.environ:
return
del os.environ[key_name]
# Used by reset_environment to store the initial environment.
_initial_environment = None
def reset_environment():
"""Resets environment variables to their initial state. Saves the initial
state on first call."""
global _initial_environment
if _initial_environment is None:
_initial_environment = copy()
# There is nothing to reset if we are initializing for the first time.
else:
# Clean current environment.
os.environ.clear()
# Add shared variables with values from _initial_environment.
os.environ.update(_initial_environment)
if is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import \
environment as untrusted_env
untrusted_env.reset_environment()
def set_common_environment_variables():
"""Sets environment variables common for different memory debugging tools."""
# G_SLICE = always-malloc: make glib use system malloc.
# NSS_DISABLE_UNLOAD = 1: make nss skip dlclosing dynamically loaded modules,
# which would result in "obj:*" in backtraces.
# NSS_DISABLE_ARENA_FREE_LIST = 1: make nss use system malloc.
set_value('G_SLICE', 'always-malloc')
set_value('NSS_DISABLE_UNLOAD', 1)
set_value('NSS_DISABLE_ARENA_FREE_LIST', 1)
set_value('NACL_DANGEROUS_SKIP_QUALIFICATION_TEST', 1)
def set_memory_tool_options(env_var, options_dict):
"""Set current memory tool options."""
set_value(env_var, join_memory_tool_options(options_dict))
def set_environment_parameters_from_file(file_path):
"""Set environment variables from a file."""
if not os.path.exists(file_path):
return
with open(file_path, 'r') as f:
file_data = f.read()
for line in file_data.splitlines():
if line.startswith('#') or not line.strip():
continue
m = re.match('([^ =]+)[ ]*=[ ]*(.*)', line)
if m:
environment_variable = m.group(1)
environment_variable_value = m.group(2)
set_value(environment_variable, environment_variable_value)
def update_symbolizer_options(tool_options, symbolize_inline_frames=False):
"""Checks and updates the necessary symbolizer options such as
`external_symbolizer_path` and `symbolize_inline_frames`."""
if 'external_symbolizer_path' not in tool_options:
llvm_symbolizer_path = get_llvm_symbolizer_path()
if llvm_symbolizer_path:
tool_options.update({
'external_symbolizer_path':
_quote_value_if_needed(llvm_symbolizer_path)
})
if 'symbolize_inline_frames' not in tool_options:
tool_options.update({
'symbolize_inline_frames': str(symbolize_inline_frames).lower()
})
def reset_current_memory_tool_options(redzone_size=0,
malloc_context_size=0,
leaks=True,
symbolize_inline_frames=False,
quarantine_size_mb=None,
disable_ubsan=False):
"""Resets environment variables for memory debugging tool to default
values."""
# FIXME: Handle these imports in a cleaner way.
from clusterfuzz._internal.platforms import android
# Set common environment variable useful for memory debugging tools.
set_common_environment_variables()
# Set memory tool name in our environment for easy access.
job_name = get_value('JOB_NAME')
tool_name = get_memory_tool_name(job_name)
set_value('MEMORY_TOOL', tool_name)
bot_platform = platform()
# Default options for memory debuggin tool used.
if tool_name in ['ASAN', 'HWASAN']:
tool_options = get_asan_options(redzone_size, malloc_context_size,
quarantine_size_mb, bot_platform, leaks,
disable_ubsan)
elif tool_name == 'KASAN':
tool_options = get_kasan_options()
elif tool_name == 'MSAN':
tool_options = get_msan_options()
elif tool_name == 'TSAN':
tool_options = get_tsan_options()
elif tool_name in ['UBSAN', 'CFI']:
tool_options = get_ubsan_options()
# Additional options. These override the defaults.
additional_tool_options = get_value('ADDITIONAL_%s_OPTIONS' % tool_name)
if additional_tool_options:
tool_options.update(_parse_memory_tool_options(additional_tool_options))
if tool_options.get('symbolize') == 1:
update_symbolizer_options(
tool_options, symbolize_inline_frames=symbolize_inline_frames)
# Join the options.
joined_tool_options = join_memory_tool_options(tool_options)
tool_options_variable_name = '%s_OPTIONS' % tool_name
set_value(tool_options_variable_name, joined_tool_options)
# CFI handles various signals through the UBSan runtime, so need to set
# UBSAN_OPTIONS explicitly. See crbug.com/716235#c25
if tool_name == 'CFI':
set_value('UBSAN_OPTIONS', joined_tool_options)
# For Android, we need to set shell property |asan.options|.
# For engine-based fuzzers, it is not needed as options variable is directly
# passed to shell.
if is_android(bot_platform) and not is_engine_fuzzer_job():
android.sanitizer.set_options(tool_name, joined_tool_options)
def set_default_vars():
"""Set default environment vars and values."""
env_file_path = os.path.join(get_value('ROOT_DIR'), 'bot', 'env.yaml')
with open(env_file_path) as file_handle:
env_file_contents = file_handle.read()
env_vars_and_values = yaml.safe_load(env_file_contents)
for variable, value in six.iteritems(env_vars_and_values):
# We cannot call set_value here.
os.environ[variable] = str(value)
def set_bot_environment():
"""Set environment for the bots."""
root_dir = get_value('ROOT_DIR')
if not root_dir:
# Error, bail out.
return False
# Reset our current working directory. Our's last job might
# have left us in a non-existent temp directory.
# Or ROOT_DIR might be deleted and recreated.
os.chdir(root_dir)
# Set some default directories. These can be overriden by config files below.
bot_dir = os.path.join(root_dir, 'bot')
if is_trusted_host(ensure_connected=False):
worker_root_dir = os.environ['WORKER_ROOT_DIR']
os.environ['BUILDS_DIR'] = os.path.join(worker_root_dir, 'bot', 'builds')
else:
os.environ['BUILDS_DIR'] = os.path.join(bot_dir, 'builds')
os.environ['BUILD_URLS_DIR'] = os.path.join(bot_dir, 'build-urls')
os.environ['LOG_DIR'] = os.path.join(bot_dir, 'logs')
os.environ['CACHE_DIR'] = os.path.join(bot_dir, 'cache')
inputs_dir = os.path.join(bot_dir, 'inputs')
os.environ['INPUT_DIR'] = inputs_dir
os.environ['CRASH_STACKTRACES_DIR'] = os.path.join(inputs_dir, 'crash-stacks')
os.environ['FUZZERS_DIR'] = os.path.join(inputs_dir, 'fuzzers')
os.environ['DATA_BUNDLES_DIR'] = os.path.join(inputs_dir, 'data-bundles')
os.environ['FUZZ_INPUTS'] = os.path.join(inputs_dir, 'fuzzer-testcases')
os.environ['FUZZ_INPUTS_MEMORY'] = os.environ['FUZZ_INPUTS']
os.environ['FUZZ_INPUTS_DISK'] = os.path.join(inputs_dir,
'fuzzer-testcases-disk')
os.environ['MUTATOR_PLUGINS_DIR'] = os.path.join(inputs_dir,
'mutator-plugins')
os.environ['FUZZ_DATA'] = os.path.join(inputs_dir,
'fuzzer-common-data-bundles')
os.environ['IMAGES_DIR'] = os.path.join(inputs_dir, 'images')
os.environ['SYMBOLS_DIR'] = os.path.join(inputs_dir, 'symbols')
os.environ['USER_PROFILE_ROOT_DIR'] = os.path.join(inputs_dir,
'user-profile-dirs')
# Set bot name.
if not get_value('BOT_NAME'):
# If not defined, default to host name.
os.environ['BOT_NAME'] = socket.gethostname().lower()
# Local temp directory (non-tmpfs).
local_tmp_dir = os.path.join(bot_dir, 'tmp')
# Set BOT_TMPDIR if not already set.
if not get_value('BOT_TMPDIR'):
os.environ['BOT_TMPDIR'] = local_tmp_dir
# Add common environment variables needed by Bazel test runner.
# See https://docs.bazel.build/versions/master/test-encyclopedia.html.
# NOTE: Do not use a tmpfs folder as some fuzz targets don't work.
os.environ['TEST_TMPDIR'] = local_tmp_dir
os.environ['TZ'] = 'UTC'
# Sets the default configuration. Can be overridden by job environment.
set_default_vars()
# Set environment variable from local project configuration.
from clusterfuzz._internal.config import local_config
local_config.ProjectConfig().set_environment()
# Success.
return True
def set_tsan_max_history_size():
"""Sets maximum history size for TSAN tool."""
tsan_options = get_value('TSAN_OPTIONS')
if not tsan_options:
return
tsan_max_history_size = 7
for i in range(tsan_max_history_size):
tsan_options = (
tsan_options.replace('history_size=%d' % i,
'history_size=%d' % tsan_max_history_size))
set_value('TSAN_OPTIONS', tsan_options)
def set_value(environment_variable, value):
"""Set an environment variable."""
value_str = str(value)
environment_variable_str = str(environment_variable)
value_str = value_str.replace('%ROOT_DIR%', os.getenv('ROOT_DIR', ''))
os.environ[environment_variable_str] = value_str
if is_trusted_host():
from clusterfuzz._internal.bot.untrusted_runner import \
environment as untrusted_env
untrusted_env.forward_environment_variable(environment_variable_str,
value_str)
def tool_matches(tool_name, job_name):
"""Return if the memory debugging tool is used in this job."""
match_prefix = '(.*[^a-zA-Z]|^)%s'
matches_tool = re.match(match_prefix % tool_name.lower(), job_name.lower())
return bool(matches_tool)
def appengine_noop(func):
"""Wrap a function into no-op and return None if running on App Engine."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if is_running_on_app_engine():
return None
return func(*args, **kwargs)
return wrapper
def bot_noop(func):
"""Wrap a function into no-op and return None if running on bot."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
is_bot = not is_running_on_app_engine()
if is_bot:
return None
return func(*args, **kwargs)
return wrapper
def is_local_development():
"""Return true if running in local development environment (e.g. running
a bot locally, excludes tests)."""
return bool(get_value('LOCAL_DEVELOPMENT') and not get_value('PY_UNITTESTS'))
def local_noop(func):
"""Wrap a function into no-op and return None if running in local
development environment."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (is_local_development() or is_running_on_app_engine_development()):
return None
return func(*args, **kwargs)
return wrapper
def is_ephemeral():
"""Return whether or not we are an ephemeral bot."""
return get_value('EPHEMERAL')
def is_android(plt=None):
"""Return true if we are on android platform."""
return 'ANDROID' in (plt or platform())
def is_android_cuttlefish(plt=None):
"""Return true if we are on android cuttlefish platform."""
return 'ANDROID_X86' in (plt or platform())
def is_android_emulator(plt=None):
"""Return true if we are on android emulator platform."""
return 'ANDROID_EMULATOR' == (plt or platform())
def is_android_kernel(plt=None):
"""Return true if we are on android kernel platform groups."""
return 'ANDROID_KERNEL' in (plt or get_platform_group())
def is_lib():
"""Whether or not we're in libClusterFuzz."""
return get_value('LIB_CF')
| |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import sys
import tempfile
from bigdl.nn.criterion import Criterion
from bigdl.nn.layer import Layer
from bigdl.optim.optimizer import MaxEpoch, EveryEpoch
from bigdl.util.common import to_list, JavaValue
from zoo.common.utils import callZooFunc
from zoo.feature.common import FeatureSet
from zoo.pipeline.api.keras.engine.topology import to_bigdl_metric, Loss, OptimMethod
from zoo.pipeline.api.net.utils import find_placeholders, to_bigdl_optim_method, find_tensors
from zoo.pipeline.estimator import Estimator
from zoo.util import nest
from zoo.util.triggers import EveryEpoch as ZEveryEpoch
from zoo.util.triggers import ZooTrigger
from zoo.tfpark.tf_dataset import TFNdarrayDataset, check_data_compatible
from zoo.tfpark.tf_dataset import _standarize_feature_label_dataset
if sys.version >= '3':
long = int
unicode = str
class IdentityCriterion(Criterion):
def __init__(self):
super(IdentityCriterion, self).__init__(None, "float")
class TFValidationMethod(JavaValue):
def __init__(self, val_method, name, output_indices, label_indices):
self.name = name
self.val_method = val_method
JavaValue.__init__(self, None, "float",
val_method, name, output_indices, label_indices)
class StatelessMetric(JavaValue):
def __init__(self, metric_name, idx, count_idx):
self.name = metric_name
self.idx = idx
self.count_idx = count_idx
JavaValue.__init__(self, None, "float", metric_name, idx, count_idx)
class BigDLMetric(object):
def __init__(self, val_method, outputs, labels):
self.val_method = val_method
self.outputs = outputs
self.labels = labels
class TFTrainingHelper(Layer):
def __init__(self, path, config_proto, saver, meta, sess):
self.saver = saver
self.meta = meta
self.export_dir = path
self.sess = sess
if config_proto is not None:
import tensorflow as tf
assert isinstance(config_proto, tf.ConfigProto), \
"session_config should be a tf.ConfigProto"
config_proto.use_per_session_threads = True
byte_arr = bytearray(config_proto.SerializeToString())
else:
byte_arr = None
super(TFTrainingHelper, self).__init__(None, "float", path, byte_arr)
def save_checkpoint(self):
callZooFunc(self.bigdl_type, "saveCheckpoint",
self.value)
def get_weights_to_python(self):
self.save_checkpoint()
self.saver.restore(self.sess, os.path.join(self.export_dir, "model"))
def load_checkpoint(self, path):
callZooFunc(self.bigdl_type, "loadZooCheckpoint", self.value, path)
self.get_weights_to_python()
def _to_operation_name(name):
return name.split(":")[0]
def _to_floats(vs):
return [float(v) for v in vs]
class TFModel(object):
def __init__(self, training_helper_layer, criterion, val_methods):
self.training_helper_layer = training_helper_layer
self.criterion = criterion
self.val_methods = val_methods
@staticmethod
def _expand_inputs(inputs, tensors_with_value, loss):
additional_inputs = []
additional_values = []
inputs = nest.flatten(inputs)
names = set([i.name for i in inputs])
if tensors_with_value:
for t, v in tensors_with_value.items():
if t.name in names:
msg = f"tensor {t} already in inputs, cannot put it in tensor_with_value"
raise ValueError(msg)
additional_inputs.append(t)
additional_values.append(v)
return inputs, additional_inputs, additional_values
@staticmethod
def _process_session_config(session_config):
import tensorflow as tf
if session_config is not None:
assert isinstance(session_config, tf.ConfigProto), \
"session_config should be a tf.ConfigProto"
session_config.use_per_session_threads = True
return session_config
@staticmethod
def _process_grads(graph, grads):
with graph.as_default():
from zoo.util.tf import process_grad
grads = [process_grad(grad) for grad in grads]
return grads
@staticmethod
def _process_metrics(graph, metrics, real_batch_size):
import tensorflow as tf
outputs = [real_batch_size]
val_methods = None
if metrics is not None:
idx = 1
val_methods = []
for metric_name in metrics:
metric = metrics[metric_name]
if tf.is_numeric_tensor(metric):
outputs.append(metric)
val_methods.append(StatelessMetric(metric_name, idx, 0))
idx += 1
else:
outputs += metric.outputs
with graph.as_default():
val_labels = [tf.identity(v) for v in metric.labels]
outputs += val_labels
method = TFValidationMethod(metric.val_method,
metric_name,
list(range(idx, idx + len(metric.outputs))),
list(range(idx + len(metric.outputs),
idx + len(metric.outputs)
+ len(val_labels))))
val_methods.append(method)
idx += len(metric.outputs) + len(val_labels)
outputs = [tf.to_float(output) for output in outputs]
return outputs, val_methods
@staticmethod
def _process_variables(graph, variables, updates):
import tensorflow as tf
all_trainable_variables = variables
name2idx = dict([(v.name, idx) for idx, v in enumerate(all_trainable_variables)])
all_variables = graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
update_ops = graph.get_collection(tf.GraphKeys.UPDATE_OPS)
if updates is not None:
update_ops += updates
trainable_variables = [0] * len(all_trainable_variables)
trainable_assigns = [0] * len(all_trainable_variables)
trainable_variable_placeholders = [0] * len(all_trainable_variables)
extra_variables = []
extra_variable_assigns = []
extra_variable_assign_placeholders = []
for v in all_variables:
p = tf.placeholder(dtype=v.dtype, shape=v.shape)
a = tf.assign(v, p)
# special treatment for ResourceVariable
if v.op.type == "VarHandleOp":
v_float_value = tf.to_float(v.read_value())
else:
v_float_value = tf.to_float(v)
if v.name in name2idx:
trainable_variables[name2idx[v.name]] = v_float_value
trainable_assigns[name2idx[v.name]] = a
trainable_variable_placeholders[name2idx[v.name]] = p
else:
extra_variables.append(v_float_value)
extra_variable_assigns.append(a)
extra_variable_assign_placeholders.append(p)
extra_variable_assign = tf.group(*extra_variable_assigns)
trainable_assign = tf.group(*trainable_assigns)
update_op = tf.group(update_ops)
return trainable_variables, trainable_variable_placeholders, trainable_assign, \
extra_variables, extra_variable_assign_placeholders, \
extra_variable_assign, update_op
@staticmethod
def _save_to_dir(folder, sess, graph,
metric_tensors,
batch_size_tensor,
loss_tensor, inputs, labels, predictions,
trainable_variables,
trainable_variable_placeholders,
trainable_assign,
extra_variables,
extra_variable_assign_placeholders,
extra_variable_assign,
grads, update_op, train_op,
additional_inputs,
additional_values):
import tensorflow as tf
from tensorflow import gfile
saver = tf.train.Saver()
if not os.path.isdir(folder):
os.makedirs(folder)
saver.save(sess, os.path.join(folder, "model"), write_meta_graph=False)
meta = {
"inputs": [i.name for i in inputs],
"input_types": [i.dtype.as_datatype_enum for i in inputs],
"additional_inputs": [i.name for i in additional_inputs],
"additional_input_types": [i.dtype.as_datatype_enum for i in additional_inputs],
"labels": [l.name for l in labels],
"label_types": [i.dtype.as_datatype_enum for i in labels],
"predictions": [t.name for t in predictions] if predictions else [],
"metric_tensors": [t.name for t in metric_tensors],
"batch_size_tensor": batch_size_tensor.name,
"loss_tensor": loss_tensor.name,
"variables": [v.name for v in trainable_variables],
"variable_types": [v.dtype.as_datatype_enum for v in trainable_variable_placeholders],
"variable_assign_placeholders": [v.name for v in trainable_variable_placeholders],
"assign_variable_op": trainable_assign.name,
"extra_variables": [v.name for v in extra_variables],
"extra_variable_types": [v.dtype.as_datatype_enum for v
in extra_variable_assign_placeholders],
"extra_variable_assign_placeholders": [p.name for p in
extra_variable_assign_placeholders],
"assign_extra_variable_op": extra_variable_assign.name,
"grad_variables": [g.name for g in grads],
"update_op": update_op.name,
"restore_op": saver.saver_def.restore_op_name,
"restore_path_placeholder": saver.saver_def.filename_tensor_name,
"save_op": _to_operation_name(saver.saver_def.save_tensor_name),
"save_path_placeholder": saver.saver_def.filename_tensor_name,
"default_tensor_value": [_to_floats(v) for v in additional_values],
"init_op": tf.tables_initializer().name
}
if train_op is not None:
meta["train_op"] = train_op.name
with open(os.path.join(folder, "training_meta.json"), "w") as f:
f.write(json.dumps(meta))
with gfile.GFile(os.path.join(folder, "model.meta"), "wb") as f:
f.write(graph.as_graph_def().SerializeToString())
return meta, saver
@staticmethod
def export(model_dir, loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,
tensors_with_value, metrics, updates, train_op=None):
import tensorflow as tf
with graph.as_default():
batch_size_tensor = tf.to_float(tf.shape(inputs[0])[0])
inputs, additional_inputs, additional_values = \
TFModel._expand_inputs(inputs, tensors_with_value, loss_tensor)
metric_tensors, val_methods = TFModel._process_metrics(graph, metrics, batch_size_tensor)
grads = TFModel._process_grads(graph, grads)
trainable_variables, trainable_variable_placeholders, trainable_assign, \
extra_variables, extra_variable_assign_placeholders, \
extra_variable_assign, update_op = \
TFModel._process_variables(graph, variables, updates)
meta, saver = \
TFModel._save_to_dir(model_dir, sess, graph,
metric_tensors,
batch_size_tensor,
loss_tensor, inputs, labels, predictions,
trainable_variables,
trainable_variable_placeholders,
trainable_assign,
extra_variables,
extra_variable_assign_placeholders,
extra_variable_assign,
grads, update_op, train_op,
additional_inputs,
additional_values)
return meta, saver, val_methods
@staticmethod
def create(loss_tensor, sess, inputs, labels, predictions, grads, variables, graph,
tensors_with_value, session_config, metrics, updates,
model_dir, train_op=None):
if model_dir is None:
model_dir = tempfile.mkdtemp()
else:
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
meta, saver, val_methods = TFModel.export(model_dir, loss_tensor, sess,
inputs, labels, predictions, grads, variables,
graph, tensors_with_value, metrics, updates,
train_op)
training_helper_layer = TFTrainingHelper(model_dir,
session_config, saver, meta, sess)
criterion = IdentityCriterion()
return TFModel(training_helper_layer, criterion, val_methods)
class TFOptimizer:
def __init__(self, tf_model, optim_method,
sess=None, dataset=None,
clip_norm=None, clip_value=None,
model_dir=None):
"""
TFOptimizer is used for distributed training of TensorFlow
on Spark/BigDL.
Note that if grads and variables are not None, then they need to be sorted by name
if you want to use multiple optimization methods for a TensorFlow model according to
variable names.
:param loss: The loss tensor of the TensorFlow model, should be a scalar
:param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam
:param sess: the current TensorFlow Session, if you want to used a pre-trained model, you
should use the Session to load the pre-trained variables and pass it to TFOptimizer.
"""
self.optim_method = optim_method
self.sess = sess
self.dataset = dataset
self.clip_norm = clip_norm
if clip_value is not None and not isinstance(clip_value, tuple):
raise ValueError("The clip_value argument should be a tuple (min_value, max_value)")
self.clip_constant = clip_value
if self.dataset.batch_size <= 0:
raise ValueError("You should set batch_size instead of batch_per_thread for training")
self.model_dir = model_dir
self.tf_model = tf_model
batch_size = self.dataset.batch_size
self.train_data = self.dataset.get_training_data()
self.val_data = self.dataset.get_validation_data()
self.batch_size = batch_size
self.estimator = Estimator(self.tf_model.training_helper_layer,
self.optim_method,
self.model_dir)
if self.clip_norm:
self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)
if self.clip_constant:
min_value, max_value = self.clip_constant
self.estimator.set_constant_gradient_clipping(min_value, max_value)
def load_checkpoint(self, path, version):
# todo make version optional
model_path = os.path.join(path, "model.{}".format(version))
optim_method_path = os.path.join(path, "optimMethod-TFParkTraining.{}".format(version))
self.tf_model.training_helper_layer.load_checkpoint(model_path)
self.optim_method = OptimMethod.load(optim_method_path)
self.estimator = Estimator(self.tf_model.training_helper_layer,
self.optim_method,
self.model_dir)
if self.clip_norm:
self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)
if self.clip_constant:
min_value, max_value = self.clip_constant
self.estimator.set_constant_gradient_clipping(min_value, max_value)
@staticmethod
def _get_or_create_session(session):
import tensorflow as tf
if session is None:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
else:
sess = session
return sess
@staticmethod
def _get_dataset_from_loss(loss):
import tensorflow as tf
all_required_inputs = find_placeholders([loss])
dataset = tf.get_collection(all_required_inputs[0].name)[0]
return dataset
@staticmethod
def _get_vars_grads(loss):
import tensorflow as tf
grads_vars = tf.train.GradientDescentOptimizer(0).compute_gradients(loss)
grads_vars.sort(key=lambda grad_var: grad_var[1].name)
variables = []
grads = []
for (grad, var) in grads_vars:
if grad is not None:
variables.append(var)
grads.append(grad)
return grads, variables
@staticmethod
def _get_vars_grads_from_train_op(train_op):
def predicate(t):
return t.name.split("/")[-1].startswith("zoo_identity_op_for_grad")
grads = find_tensors([train_op], predicate)
grad_ops = [grad.op for grad in grads]
variables = []
for grad in grad_ops:
var = list(grad.control_inputs)[0]
if var.name == "VarHandleOp":
variables.append(var)
else:
variables.append(list(var.outputs)[0])
# variables = [grad.op.control_inputs[0].outputs[0] for grad in grads]
return grads, variables
@classmethod
def from_train_op(cls, train_op, loss, *, inputs=None, labels=None, metrics=None, updates=None,
sess=None, dataset=None, tensor_with_value=None, session_config=None,
model_dir=None):
sess = TFOptimizer._get_or_create_session(sess)
grads, variables = TFOptimizer._get_vars_grads_from_train_op(train_op)
if dataset is None:
dataset = TFOptimizer._get_dataset_from_loss(loss)
_ = dataset.tensors # trigger create tensors if not available
dataset_inputs = dataset._original_tensors
if isinstance(dataset_inputs, tuple) and len(dataset_inputs) == 2:
if inputs is None:
inputs = dataset_inputs[0]
if labels is None:
labels = dataset_inputs[1]
else:
if inputs is None:
inputs = dataset_inputs
if labels is None:
labels = []
inputs = nest.flatten(inputs)
labels = nest.flatten(labels)
from zoo.tfpark.zoo_optimizer import FakeOptimMethod
return TFOptimizer._from_grads(loss=loss, sess=sess, inputs=inputs, labels=labels,
grads=grads,
variables=variables, dataset=dataset, metrics=metrics,
tensor_with_value=tensor_with_value,
optim_method=FakeOptimMethod(),
session_config=session_config, updates=updates,
model_dir=model_dir, train_op=train_op)
@classmethod
def _from_grads(cls, loss, sess, inputs, labels, grads, variables, dataset, optim_method=None,
clip_norm=None, clip_value=None,
metrics=None, tensor_with_value=None, session_config=None,
model_dir=None, updates=None, train_op=None):
graph = loss.graph
if metrics is None:
metrics = {}
tf_model = TFModel.create(loss, sess, inputs, labels, [], grads, variables, graph,
tensor_with_value, session_config, metrics,
updates, model_dir=None, train_op=train_op)
return cls(tf_model, optim_method, sess=sess, dataset=dataset,
clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)
@classmethod
def from_loss(cls, loss, optim_method, session=None, inputs=None, dataset=None,
val_outputs=None, val_labels=None, val_method=None,
clip_norm=None, clip_value=None, metrics=None,
tensor_with_value=None, session_config=None, model_dir=None, updates=None):
"""
Create a TFOptimizer from a TensorFlow loss tensor.
The loss tensor must come from a TensorFlow graph that only takes TFDataset.tensors and
the tensors in `tensor_with_value` as inputs.
:param loss: The loss tensor of the TensorFlow model, should be a scalar
:param optim_method: the optimization method to be used, such as bigdl.optim.optimizer.Adam
:param session: the current TensorFlow Session, if you want to used a pre-trained model,
you should use the Session to load the pre-trained variables and pass it to TFOptimizer.
:param val_outputs: the validation output TensorFlow tensor to be used by val_methods
:param val_labels: the validation label TensorFlow tensor to be used by val_methods
:param val_method: the BigDL val_method(s) to be used.
:param clip_norm: float >= 0. Gradients will be clipped when their L2 norm exceeds
this value.
:param clip_value: float >= 0. Gradients will be clipped when their absolute value
exceeds this value.
:param metrics: a dictionary. The key should be a string representing the metric's name
and the value should be the corresponding TensorFlow tensor, which should be a scalar.
:param tensor_with_value: a dictionary. The key is TensorFlow tensor, usually a
placeholder, the value of the dictionary is a tuple of two elements. The first one of
the tuple is the value to feed to the tensor in training phase and the second one
is the value to feed to the tensor in validation phase.
:return: a TFOptimizer
"""
sess = TFOptimizer._get_or_create_session(session)
grads, variables = TFOptimizer._get_vars_grads(loss)
if dataset is None and inputs is None:
dataset = TFOptimizer._get_dataset_from_loss(loss)
inputs = dataset._original_tensors
else:
if inputs is None:
raise ValueError("please specify inputs")
_ = dataset.tensors # trigger creating placeholders
if isinstance(inputs, tuple) and len(inputs) == 2:
inputs, labels = inputs
else:
labels = []
inputs = nest.flatten(inputs)
labels = nest.flatten(labels)
if clip_value is not None:
if isinstance(clip_value, float) or isinstance(clip_value, int):
if clip_value <= 0:
ValueError("The clip_value argument should be positive number")
clip_value = (-float(clip_value), float(clip_value))
if not isinstance(clip_value, tuple):
raise ValueError("The clip_value argument should be" +
" a positive float/int which clips to" +
" (-clip_value, clip_value); " +
"or a tuple which clips to (min_value, max_value)")
if val_method is not None:
val_methods = to_list(val_method)
if metrics is None:
metrics = {}
for i, method in enumerate(val_methods):
metrics['bigdl_metric_' + str(i)] = BigDLMetric(method, val_outputs, val_labels)
return TFOptimizer._from_grads(loss, sess, inputs, labels, grads, variables, dataset,
optim_method, clip_norm, clip_value,
metrics, tensor_with_value, session_config,
model_dir, updates)
@staticmethod
def export_training_model(export_dir, loss, sess, inputs, labels=None, predictions=None,
metrics=None, tensor_with_value=None, updates=None):
grads, variables = TFOptimizer._get_vars_grads(loss)
TFModel.export(export_dir, loss, sess, inputs, labels, predictions, grads, variables,
loss.graph, tensor_with_value, metrics, updates)
logging.info("Exported TensorFlow model in {} for training".format(export_dir))
@staticmethod
def _shape_match(model_shape, dataset_shape):
for i in range(len(dataset_shape)):
if dataset_shape[i].value is None:
return model_shape[i].value is None
else:
return dataset_shape[i].value == model_shape[i].value or \
model_shape[i].value is None
@classmethod
def from_keras(cls, keras_model, dataset,
session_config=None, model_dir=None, metrics=None, optimizer=None):
"""
Create a TFOptimizer from a tensorflow.keras model. The model must be compiled.
:param keras_model: the tensorflow.keras model, which must be compiled.
:param dataset: a TFDataset
:return:
"""
import tensorflow.keras.backend as K
model_inputs = keras_model.inputs
if hasattr(keras_model, "targets"):
model_targets = keras_model.targets
else:
model_targets = keras_model._targets
# target can be None if loss is None
model_targets = list(filter(lambda x: x is not None, model_targets))
check_data_compatible(dataset, keras_model, mode="train")
# standarize feature, labels to support keras model
if isinstance(dataset, TFNdarrayDataset):
dataset = _standarize_feature_label_dataset(dataset, keras_model)
flatten_inputs = nest.flatten(dataset.feature_tensors)
assert len(model_inputs) == len(flatten_inputs), \
("the keras model and TFDataset should have the same number of tensors" +
" keras model has {} inputs " +
"while TFDataset has {} inputs").format(len(model_inputs),
len(flatten_inputs))
for i in range(len(flatten_inputs)):
if not TFOptimizer._shape_match(model_inputs[i].shape, flatten_inputs[i].shape):
raise ValueError(("The {}th input in keras model {}"
" does not match the TFDataset"
"input {}").format(i,
model_inputs[i],
flatten_inputs[i]))
flatten_targets = nest.flatten(dataset.label_tensors)
assert len(model_targets) == len(flatten_targets), \
("the keras model and TFDataset should have the same number of tensors" +
" keras model has {} targets " +
"while TFDataset has {} labels").format(len(model_targets),
len(flatten_inputs))
# todo check targets shape, currently checking target shape will
# cause too much false alarm.
loss = keras_model.total_loss
variables = keras_model._collected_trainable_weights
variables.sort(key=lambda variable: variable.name)
keras_optimizer = keras_model.optimizer
from zoo.tfpark.zoo_optimizer import get_gradients_for_keras
grads = get_gradients_for_keras(keras_optimizer, loss, variables)
grads_and_vars = list(zip(grads, variables))
import tensorflow.python.keras.optimizers as koptimizers
if isinstance(keras_optimizer, koptimizers.TFOptimizer):
# work around keras TFOptimzier bug
train_op = keras_optimizer.optimizer.apply_gradients(grads_and_vars)
else:
train_op = keras_optimizer.apply_gradients(grads_and_vars)
sess = K.get_session()
if keras_model.metrics and (dataset.get_validation_data() is not None):
if isinstance(keras_model.metrics, dict):
raise ValueError(
"different metrics for different outputs are not supported right now")
if len(keras_model.outputs) > 1:
if not all([name.endswith("loss") for name in keras_model.metrics_names]):
raise ValueError("metrics (except loss) for multi-head model is not supported")
else:
bigdl_val_methods = [Loss()]
val_outputs = keras_model.outputs
val_labels = model_targets
else:
bigdl_val_methods = \
[to_bigdl_metric(m, keras_model.loss) for m in keras_model.metrics_names]
val_outputs = keras_model.outputs
val_labels = model_targets
else:
val_outputs = None
val_labels = None
bigdl_val_methods = None
tensor_with_value = {
K.learning_phase(): [True, False]
}
updates = []
updates += keras_model.get_updates_for(None)
# Conditional updates relevant to this model
updates += keras_model.get_updates_for(keras_model.inputs)
if bigdl_val_methods is not None:
val_methods = to_list(bigdl_val_methods)
bigdl_metrics = {}
for i, method in enumerate(val_methods):
bigdl_metrics['bigdl_metric_' + str(i)] = BigDLMetric(method,
val_outputs,
val_labels)
if metrics is None:
metrics = bigdl_metrics
else:
metrics.update(bigdl_metrics)
if optimizer is not None:
clip_norm = None
clip_value = None
if hasattr(keras_optimizer, 'clipnorm'):
clip_norm = keras_optimizer.clipnorm
if hasattr(keras_optimizer, 'clipvalue'):
clip_value = (-keras_optimizer.clipvalue, keras_optimizer.clipvalue)
tf_model = TFModel.create(loss, sess, model_inputs, model_targets, keras_model.outputs,
grads, variables, loss.graph,
tensor_with_value, session_config, metrics,
updates, model_dir=None)
return cls(tf_model, optimizer, sess=sess, dataset=dataset,
clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)
return cls.from_train_op(train_op, loss, inputs=model_inputs, labels=model_targets,
metrics=metrics, updates=updates, sess=sess, dataset=dataset,
tensor_with_value=tensor_with_value, session_config=session_config,
model_dir=model_dir)
def set_constant_gradient_clipping(self, min_value, max_value):
"""
Configure constant clipping settings.
:param min_value: the minimum value to clip by
:param max_value: the maxmimum value to clip by
"""
self.estimator.set_constant_gradient_clipping(min_value, max_value)
def set_gradient_clipping_by_l2_norm(self, clip_norm):
"""
Configure L2 norm clipping settings.
:param clip_norm: gradient L2-Norm threshold
"""
self.estimator.set_l2_norm_gradient_clipping(clip_norm)
def optimize(self, end_trigger=None, checkpoint_trigger=None):
"""
Run the training loop of the this optimizer
:param end_trigger: BigDL's Trigger to indicate when to stop the training.
:param checkpoint_trigger: When to save a checkpoint and evaluate model.
"""
if end_trigger is None:
end_trigger = MaxEpoch(1)
if checkpoint_trigger is None:
checkpoint_trigger = EveryEpoch()
if isinstance(self.train_data, FeatureSet):
if self.train_data.value.getNumOfSlice() != 1:
if isinstance(checkpoint_trigger, EveryEpoch):
checkpoint_trigger = ZEveryEpoch()
elif not isinstance(checkpoint_trigger, ZooTrigger):
raise Exception("Please use a trigger defined in zoo.util.triggers")
if self.tf_model.val_methods and self.val_data is not None:
self.estimator.train_minibatch(train_set=self.train_data,
criterion=self.tf_model.criterion,
end_trigger=end_trigger,
checkpoint_trigger=checkpoint_trigger,
validation_set=self.val_data,
validation_method=self.tf_model.val_methods)
else:
self.estimator.train_minibatch(train_set=self.train_data,
criterion=self.tf_model.criterion,
end_trigger=end_trigger,
checkpoint_trigger=checkpoint_trigger)
self.tf_model.training_helper_layer.get_weights_to_python()
| |
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class PictureOffsetsTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'stored_ftwo': 'int',
'display_ftwo': 'int',
'sampled_x': 'int',
'sampled_y': 'int',
'display_x': 'int',
'display_y': 'int',
'image_start': 'int',
'image_end': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'stored_ftwo': 'stored_ftwo',
'display_ftwo': 'display_ftwo',
'sampled_x': 'sampled_x',
'sampled_y': 'sampled_y',
'display_x': 'display_x',
'display_y': 'display_y',
'image_start': 'image_start',
'image_end': 'image_end',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, stored_ftwo=None, display_ftwo=None, sampled_x=None, sampled_y=None, display_x=None, display_y=None, image_start=None, image_end=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""PictureOffsetsTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._stored_ftwo = None
self._display_ftwo = None
self._sampled_x = None
self._sampled_y = None
self._display_x = None
self._display_y = None
self._image_start = None
self._image_end = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if stored_ftwo is not None:
self.stored_ftwo = stored_ftwo
if display_ftwo is not None:
self.display_ftwo = display_ftwo
if sampled_x is not None:
self.sampled_x = sampled_x
if sampled_y is not None:
self.sampled_y = sampled_y
if display_x is not None:
self.display_x = display_x
if display_y is not None:
self.display_y = display_y
if image_start is not None:
self.image_start = image_start
if image_end is not None:
self.image_end = image_end
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def stored_ftwo(self):
"""Gets the stored_ftwo of this PictureOffsetsTest. # noqa: E501
:return: The stored_ftwo of this PictureOffsetsTest. # noqa: E501
:rtype: int
"""
return self._stored_ftwo
@stored_ftwo.setter
def stored_ftwo(self, stored_ftwo):
"""Sets the stored_ftwo of this PictureOffsetsTest.
:param stored_ftwo: The stored_ftwo of this PictureOffsetsTest. # noqa: E501
:type: int
"""
self._stored_ftwo = stored_ftwo
@property
def display_ftwo(self):
"""Gets the display_ftwo of this PictureOffsetsTest. # noqa: E501
:return: The display_ftwo of this PictureOffsetsTest. # noqa: E501
:rtype: int
"""
return self._display_ftwo
@display_ftwo.setter
def display_ftwo(self, display_ftwo):
"""Sets the display_ftwo of this PictureOffsetsTest.
:param display_ftwo: The display_ftwo of this PictureOffsetsTest. # noqa: E501
:type: int
"""
self._display_ftwo = display_ftwo
@property
def sampled_x(self):
"""Gets the sampled_x of this PictureOffsetsTest. # noqa: E501
:return: The sampled_x of this PictureOffsetsTest. # noqa: E501
:rtype: int
"""
return self._sampled_x
@sampled_x.setter
def sampled_x(self, sampled_x):
"""Sets the sampled_x of this PictureOffsetsTest.
:param sampled_x: The sampled_x of this PictureOffsetsTest. # noqa: E501
:type: int
"""
self._sampled_x = sampled_x
@property
def sampled_y(self):
"""Gets the sampled_y of this PictureOffsetsTest. # noqa: E501
:return: The sampled_y of this PictureOffsetsTest. # noqa: E501
:rtype: int
"""
return self._sampled_y
@sampled_y.setter
def sampled_y(self, sampled_y):
"""Sets the sampled_y of this PictureOffsetsTest.
:param sampled_y: The sampled_y of this PictureOffsetsTest. # noqa: E501
:type: int
"""
self._sampled_y = sampled_y
@property
def display_x(self):
"""Gets the display_x of this PictureOffsetsTest. # noqa: E501
:return: The display_x of this PictureOffsetsTest. # noqa: E501
:rtype: int
"""
return self._display_x
@display_x.setter
def display_x(self, display_x):
"""Sets the display_x of this PictureOffsetsTest.
:param display_x: The display_x of this PictureOffsetsTest. # noqa: E501
:type: int
"""
self._display_x = display_x
@property
def display_y(self):
"""Gets the display_y of this PictureOffsetsTest. # noqa: E501
:return: The display_y of this PictureOffsetsTest. # noqa: E501
:rtype: int
"""
return self._display_y
@display_y.setter
def display_y(self, display_y):
"""Sets the display_y of this PictureOffsetsTest.
:param display_y: The display_y of this PictureOffsetsTest. # noqa: E501
:type: int
"""
self._display_y = display_y
@property
def image_start(self):
"""Gets the image_start of this PictureOffsetsTest. # noqa: E501
:return: The image_start of this PictureOffsetsTest. # noqa: E501
:rtype: int
"""
return self._image_start
@image_start.setter
def image_start(self, image_start):
"""Sets the image_start of this PictureOffsetsTest.
:param image_start: The image_start of this PictureOffsetsTest. # noqa: E501
:type: int
"""
self._image_start = image_start
@property
def image_end(self):
"""Gets the image_end of this PictureOffsetsTest. # noqa: E501
:return: The image_end of this PictureOffsetsTest. # noqa: E501
:rtype: int
"""
return self._image_end
@image_end.setter
def image_end(self, image_end):
"""Sets the image_end of this PictureOffsetsTest.
:param image_end: The image_end of this PictureOffsetsTest. # noqa: E501
:type: int
"""
self._image_end = image_end
@property
def reject_on_error(self):
"""Gets the reject_on_error of this PictureOffsetsTest. # noqa: E501
:return: The reject_on_error of this PictureOffsetsTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this PictureOffsetsTest.
:param reject_on_error: The reject_on_error of this PictureOffsetsTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this PictureOffsetsTest. # noqa: E501
:return: The checked of this PictureOffsetsTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this PictureOffsetsTest.
:param checked: The checked of this PictureOffsetsTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PictureOffsetsTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PictureOffsetsTest):
return True
return self.to_dict() != other.to_dict()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for PowerVMDriver.
"""
import contextlib
from nova import context
from nova import db
from nova import test
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.tests import fake_network_cache_model
from nova.tests.image import fake
from nova.virt import images
from nova.virt.powervm import blockdev as powervm_blockdev
from nova.virt.powervm import common
from nova.virt.powervm import driver as powervm_driver
from nova.virt.powervm import exception
from nova.virt.powervm import lpar
from nova.virt.powervm import operator
LOG = logging.getLogger(__name__)
def fake_lpar(instance_name):
return lpar.LPAR(name=instance_name,
lpar_id=1, desired_mem=1024,
max_mem=2048, max_procs=2,
uptime=939395, state='Running')
class FakeIVMOperator(object):
def get_lpar(self, instance_name, resource_type='lpar'):
return fake_lpar(instance_name)
def list_lpar_instances(self):
return ['instance-00000001', 'instance-00000002']
def create_lpar(self, lpar):
pass
def start_lpar(self, instance_name):
pass
def stop_lpar(self, instance_name, time_out=30):
pass
def remove_lpar(self, instance_name):
pass
def get_vhost_by_instance_id(self, instance_id):
return 'vhostfake'
def get_virtual_eth_adapter_id(self):
return 1
def get_disk_name_by_vhost(self, vhost):
return 'lvfake01'
def remove_disk(self, disk_name):
pass
def run_cfg_dev(self, device_name):
pass
def attach_disk_to_vhost(self, disk, vhost):
pass
def get_memory_info(self):
return {'total_mem': 65536, 'avail_mem': 46336}
def get_cpu_info(self):
return {'total_procs': 8.0, 'avail_procs': 6.3}
def get_disk_info(self):
return {'disk_total': 10168,
'disk_used': 0,
'disk_avail': 10168}
def get_hostname(self):
return 'fake-powervm'
def rename_lpar(self, old, new):
pass
class FakeBlockAdapter(powervm_blockdev.PowerVMLocalVolumeAdapter):
def __init__(self):
self.connection_data = common.Connection(host='fake_compute_1',
username='fake_user',
password='fake_pass')
pass
def _create_logical_volume(self, size):
return 'lvfake01'
def _remove_logical_volume(self, lv_name):
pass
def _copy_file_to_device(self, sourcePath, device, decrompress=True):
pass
def _copy_image_file(self, sourcePath, remotePath, decompress=False):
finalPath = '/tmp/rhel62.raw.7e358754160433febd6f3318b7c9e335'
size = 4294967296
return finalPath, size
def _copy_device_to_file(self, device_name, file_path):
pass
def _copy_image_file_from_host(self, remote_source_path, local_dest_dir,
compress=False):
snapshot_file = '/tmp/rhel62.raw.7e358754160433febd6f3318b7c9e335'
snap_ref = open(snapshot_file, 'w+')
snap_ref.close()
return snapshot_file
def fake_get_powervm_operator():
return FakeIVMOperator()
class PowerVMDriverTestCase(test.TestCase):
"""Unit tests for PowerVM connection calls."""
def setUp(self):
super(PowerVMDriverTestCase, self).setUp()
self.stubs.Set(operator, 'get_powervm_operator',
fake_get_powervm_operator)
self.stubs.Set(operator, 'get_powervm_disk_adapter',
lambda: FakeBlockAdapter())
self.powervm_connection = powervm_driver.PowerVMDriver(None)
self.instance = self._create_instance()
def _create_instance(self):
fake.stub_out_image_service(self.stubs)
ctxt = context.get_admin_context()
instance_type = db.instance_type_get(ctxt, 1)
sys_meta = instance_types.save_instance_type_info({}, instance_type)
return db.instance_create(ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1,
'memory_mb': 1024,
'vcpus': 2,
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'system_metadata': sys_meta})
def test_list_instances(self):
instances = self.powervm_connection.list_instances()
self.assertTrue('instance-00000001' in instances)
self.assertTrue('instance-00000002' in instances)
def test_instance_exists(self):
name = self.instance['name']
self.assertTrue(self.powervm_connection.instance_exists(name))
def test_spawn(self):
def fake_image_fetch(context, image_id, file_path,
user_id, project_id):
pass
self.flags(powervm_img_local_path='/images/')
self.stubs.Set(images, 'fetch', fake_image_fetch)
image_meta = {}
image_meta['id'] = '666'
fake_net_info = network_model.NetworkInfo([
fake_network_cache_model.new_vif()])
self.powervm_connection.spawn(context.get_admin_context(),
self.instance, image_meta, [], 's3cr3t',
fake_net_info)
state = self.powervm_connection.get_info(self.instance)['state']
self.assertEqual(state, power_state.RUNNING)
def test_spawn_cleanup_on_fail(self):
# Verify on a failed spawn, we get the original exception raised.
# helper function
def raise_(ex):
raise ex
self.flags(powervm_img_local_path='/images/')
self.stubs.Set(images, 'fetch', lambda *x, **y: None)
self.stubs.Set(
self.powervm_connection._powervm._disk_adapter,
'create_volume_from_image',
lambda *x, **y: raise_(exception.PowerVMImageCreationFailed()))
self.stubs.Set(
self.powervm_connection._powervm, '_cleanup',
lambda *x, **y: raise_(Exception('This should be logged.')))
fake_net_info = network_model.NetworkInfo([
fake_network_cache_model.new_vif()])
self.assertRaises(exception.PowerVMImageCreationFailed,
self.powervm_connection.spawn,
context.get_admin_context(),
self.instance,
{'id': 'ANY_ID'}, [], 's3cr3t', fake_net_info)
def test_snapshot(self):
def update_task_state(task_state, expected_state=None):
self._loc_task_state = task_state
self._loc_expected_task_state = expected_state
loc_context = context.get_admin_context()
properties = {'instance_id': self.instance['id'],
'user_id': str(loc_context.user_id)}
sent_meta = {'name': 'fake_snap', 'is_public': False,
'status': 'creating', 'properties': properties}
image_service = fake.FakeImageService()
recv_meta = image_service.create(loc_context, sent_meta)
self.powervm_connection.snapshot(loc_context,
self.instance, recv_meta['id'],
update_task_state)
self.assertTrue(self._loc_task_state == task_states.IMAGE_UPLOADING and
self._loc_expected_task_state == task_states.IMAGE_PENDING_UPLOAD)
def test_destroy(self):
self.powervm_connection.destroy(self.instance, None)
self.stubs.Set(FakeIVMOperator, 'get_lpar', lambda x, y: None)
name = self.instance['name']
self.assertFalse(self.powervm_connection.instance_exists(name))
def test_get_info(self):
info = self.powervm_connection.get_info(self.instance)
self.assertEqual(info['state'], power_state.RUNNING)
self.assertEqual(info['max_mem'], 2048)
self.assertEqual(info['mem'], 1024)
self.assertEqual(info['num_cpu'], 2)
self.assertEqual(info['cpu_time'], 939395)
def test_remote_utility_1(self):
path_one = '/some/file/'
path_two = '/path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def test_remote_utility_2(self):
path_one = '/some/file/'
path_two = 'path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def test_remote_utility_3(self):
path_one = '/some/file'
path_two = '/path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def test_remote_utility_4(self):
path_one = '/some/file'
path_two = 'path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def _test_finish_revert_migration_after_crash(self, backup_made, new_made):
inst = {'name': 'foo'}
self.mox.StubOutWithMock(self.powervm_connection, 'instance_exists')
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'destroy')
self.mox.StubOutWithMock(self.powervm_connection._powervm._operator,
'rename_lpar')
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'power_on')
self.powervm_connection.instance_exists('rsz_foo').AndReturn(
backup_made)
if backup_made:
self.powervm_connection.instance_exists('foo').AndReturn(new_made)
if new_made:
self.powervm_connection._powervm.destroy('foo')
self.powervm_connection._powervm._operator.rename_lpar('rsz_foo',
'foo')
self.powervm_connection._powervm.power_on('foo')
self.mox.ReplayAll()
self.powervm_connection.finish_revert_migration(inst, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
def test_migrate_volume_use_instance_name(self):
inst_name = 'instance-00000000'
lv_name = 'logical-vol-name'
src_host = 'compute_host_1'
dest = 'compute_host_1'
image_path = 'some/image/path'
fake_noop = lambda *args, **kwargs: None
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'_copy_device_to_file', fake_noop)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command_as_root', fake_noop)
blockdev_op = self.powervm_connection._powervm._disk_adapter
file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
image_path, inst_name)
expected_path = 'some/image/path/instance-00000000_rsz.gz'
self.assertEqual(file_path, expected_path)
def test_migrate_volume_use_lv_name(self):
lv_name = 'logical-vol-name'
src_host = 'compute_host_1'
dest = 'compute_host_1'
image_path = 'some/image/path'
fake_noop = lambda *args, **kwargs: None
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'_copy_device_to_file', fake_noop)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command_as_root', fake_noop)
blockdev_op = self.powervm_connection._powervm._disk_adapter
file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
image_path)
expected_path = 'some/image/path/logical-vol-name_rsz.gz'
self.assertEqual(file_path, expected_path)
def test_migrate_build_scp_command(self):
lv_name = 'logical-vol-name'
src_host = 'compute_host_1'
dest = 'compute_host_2'
image_path = 'some/image/path'
fake_noop = lambda *args, **kwargs: None
@contextlib.contextmanager
def fake_vios_to_vios_auth(*args, **kwargs):
key_name = 'some_key'
yield key_name
self.stubs.Set(common, 'vios_to_vios_auth',
fake_vios_to_vios_auth)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command_as_root', fake_noop)
def fake_run_vios_command(*args, **kwargs):
cmd = args[0]
exp_cmd = ' '.join(['scp -o "StrictHostKeyChecking no" -i',
'some_key',
'some/image/path/logical-vol-name_rsz.gz',
'fake_user@compute_host_2:some/image/path'])
self.assertEqual(exp_cmd, cmd)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command',
fake_run_vios_command)
blockdev_op = self.powervm_connection._powervm._disk_adapter
file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
image_path)
def test_get_resize_name(self):
inst_name = 'instance-00000001'
expected_name = 'rsz_instance-00000001'
result = self.powervm_connection._get_resize_name(inst_name)
self.assertEqual(expected_name, result)
def test_get_long_resize_name(self):
inst_name = 'some_really_long_instance_name_00000001'
expected_name = 'rsz__really_long_instance_name_00000001'
result = self.powervm_connection._get_resize_name(inst_name)
self.assertEqual(expected_name, result)
def test_get_host_stats(self):
host_stats = self.powervm_connection.get_host_stats(True)
self.assertIsNotNone(host_stats)
self.assertEquals(host_stats['vcpus'], 8.0)
self.assertEquals(round(host_stats['vcpus_used'], 1), 1.7)
self.assertEquals(host_stats['host_memory_total'], 65536)
self.assertEquals(host_stats['host_memory_free'], 46336)
self.assertEquals(host_stats['disk_total'], 10168)
self.assertEquals(host_stats['disk_used'], 0)
self.assertEquals(host_stats['disk_available'], 10168)
self.assertEquals(host_stats['disk_total'],
host_stats['disk_used'] +
host_stats['disk_available'])
self.assertEquals(host_stats['cpu_info'], ('ppc64', 'powervm', '3940'))
self.assertEquals(host_stats['hypervisor_type'], 'powervm')
self.assertEquals(host_stats['hypervisor_version'], '7.1')
self.assertEquals(host_stats['hypervisor_hostname'], "fake-powervm")
self.assertEquals(host_stats['supported_instances'][0][0], "ppc64")
self.assertEquals(host_stats['supported_instances'][0][1], "powervm")
self.assertEquals(host_stats['supported_instances'][0][2], "hvm")
| |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import binlogdata_pb2 as binlogdata__pb2
import query_pb2 as query__pb2
class QueryStub(object):
"""Query defines the tablet query service, implemented by vttablet.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Execute = channel.unary_unary(
'/queryservice.Query/Execute',
request_serializer=query__pb2.ExecuteRequest.SerializeToString,
response_deserializer=query__pb2.ExecuteResponse.FromString,
)
self.ExecuteBatch = channel.unary_unary(
'/queryservice.Query/ExecuteBatch',
request_serializer=query__pb2.ExecuteBatchRequest.SerializeToString,
response_deserializer=query__pb2.ExecuteBatchResponse.FromString,
)
self.StreamExecute = channel.unary_stream(
'/queryservice.Query/StreamExecute',
request_serializer=query__pb2.StreamExecuteRequest.SerializeToString,
response_deserializer=query__pb2.StreamExecuteResponse.FromString,
)
self.Begin = channel.unary_unary(
'/queryservice.Query/Begin',
request_serializer=query__pb2.BeginRequest.SerializeToString,
response_deserializer=query__pb2.BeginResponse.FromString,
)
self.Commit = channel.unary_unary(
'/queryservice.Query/Commit',
request_serializer=query__pb2.CommitRequest.SerializeToString,
response_deserializer=query__pb2.CommitResponse.FromString,
)
self.Rollback = channel.unary_unary(
'/queryservice.Query/Rollback',
request_serializer=query__pb2.RollbackRequest.SerializeToString,
response_deserializer=query__pb2.RollbackResponse.FromString,
)
self.Prepare = channel.unary_unary(
'/queryservice.Query/Prepare',
request_serializer=query__pb2.PrepareRequest.SerializeToString,
response_deserializer=query__pb2.PrepareResponse.FromString,
)
self.CommitPrepared = channel.unary_unary(
'/queryservice.Query/CommitPrepared',
request_serializer=query__pb2.CommitPreparedRequest.SerializeToString,
response_deserializer=query__pb2.CommitPreparedResponse.FromString,
)
self.RollbackPrepared = channel.unary_unary(
'/queryservice.Query/RollbackPrepared',
request_serializer=query__pb2.RollbackPreparedRequest.SerializeToString,
response_deserializer=query__pb2.RollbackPreparedResponse.FromString,
)
self.CreateTransaction = channel.unary_unary(
'/queryservice.Query/CreateTransaction',
request_serializer=query__pb2.CreateTransactionRequest.SerializeToString,
response_deserializer=query__pb2.CreateTransactionResponse.FromString,
)
self.StartCommit = channel.unary_unary(
'/queryservice.Query/StartCommit',
request_serializer=query__pb2.StartCommitRequest.SerializeToString,
response_deserializer=query__pb2.StartCommitResponse.FromString,
)
self.SetRollback = channel.unary_unary(
'/queryservice.Query/SetRollback',
request_serializer=query__pb2.SetRollbackRequest.SerializeToString,
response_deserializer=query__pb2.SetRollbackResponse.FromString,
)
self.ConcludeTransaction = channel.unary_unary(
'/queryservice.Query/ConcludeTransaction',
request_serializer=query__pb2.ConcludeTransactionRequest.SerializeToString,
response_deserializer=query__pb2.ConcludeTransactionResponse.FromString,
)
self.ReadTransaction = channel.unary_unary(
'/queryservice.Query/ReadTransaction',
request_serializer=query__pb2.ReadTransactionRequest.SerializeToString,
response_deserializer=query__pb2.ReadTransactionResponse.FromString,
)
self.BeginExecute = channel.unary_unary(
'/queryservice.Query/BeginExecute',
request_serializer=query__pb2.BeginExecuteRequest.SerializeToString,
response_deserializer=query__pb2.BeginExecuteResponse.FromString,
)
self.BeginExecuteBatch = channel.unary_unary(
'/queryservice.Query/BeginExecuteBatch',
request_serializer=query__pb2.BeginExecuteBatchRequest.SerializeToString,
response_deserializer=query__pb2.BeginExecuteBatchResponse.FromString,
)
self.MessageStream = channel.unary_stream(
'/queryservice.Query/MessageStream',
request_serializer=query__pb2.MessageStreamRequest.SerializeToString,
response_deserializer=query__pb2.MessageStreamResponse.FromString,
)
self.MessageAck = channel.unary_unary(
'/queryservice.Query/MessageAck',
request_serializer=query__pb2.MessageAckRequest.SerializeToString,
response_deserializer=query__pb2.MessageAckResponse.FromString,
)
self.SplitQuery = channel.unary_unary(
'/queryservice.Query/SplitQuery',
request_serializer=query__pb2.SplitQueryRequest.SerializeToString,
response_deserializer=query__pb2.SplitQueryResponse.FromString,
)
self.StreamHealth = channel.unary_stream(
'/queryservice.Query/StreamHealth',
request_serializer=query__pb2.StreamHealthRequest.SerializeToString,
response_deserializer=query__pb2.StreamHealthResponse.FromString,
)
self.UpdateStream = channel.unary_stream(
'/queryservice.Query/UpdateStream',
request_serializer=query__pb2.UpdateStreamRequest.SerializeToString,
response_deserializer=query__pb2.UpdateStreamResponse.FromString,
)
self.VStream = channel.unary_stream(
'/queryservice.Query/VStream',
request_serializer=binlogdata__pb2.VStreamRequest.SerializeToString,
response_deserializer=binlogdata__pb2.VStreamResponse.FromString,
)
self.VStreamRows = channel.unary_stream(
'/queryservice.Query/VStreamRows',
request_serializer=binlogdata__pb2.VStreamRowsRequest.SerializeToString,
response_deserializer=binlogdata__pb2.VStreamRowsResponse.FromString,
)
class QueryServicer(object):
"""Query defines the tablet query service, implemented by vttablet.
"""
def Execute(self, request, context):
"""Execute executes the specified SQL query (might be in a
transaction context, if Query.transaction_id is set).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ExecuteBatch(self, request, context):
"""ExecuteBatch executes a list of queries, and returns the result
for each query.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamExecute(self, request, context):
"""StreamExecute executes a streaming query. Use this method if the
query returns a large number of rows. The first QueryResult will
contain the Fields, subsequent QueryResult messages will contain
the rows.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Begin(self, request, context):
"""Begin a transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Commit(self, request, context):
"""Commit a transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Rollback(self, request, context):
"""Rollback a transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Prepare(self, request, context):
"""Prepare preares a transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CommitPrepared(self, request, context):
"""CommitPrepared commits a prepared transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RollbackPrepared(self, request, context):
"""RollbackPrepared rolls back a prepared transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateTransaction(self, request, context):
"""CreateTransaction creates the metadata for a 2pc transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StartCommit(self, request, context):
"""StartCommit initiates a commit for a 2pc transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRollback(self, request, context):
"""SetRollback marks the 2pc transaction for rollback.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ConcludeTransaction(self, request, context):
"""ConcludeTransaction marks the 2pc transaction as resolved.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadTransaction(self, request, context):
"""ReadTransaction returns the 2pc transaction info.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BeginExecute(self, request, context):
"""BeginExecute executes a begin and the specified SQL query.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BeginExecuteBatch(self, request, context):
"""BeginExecuteBatch executes a begin and a list of queries.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessageStream(self, request, context):
"""MessageStream streams messages from a message table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MessageAck(self, request, context):
"""MessageAck acks messages for a table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SplitQuery(self, request, context):
"""SplitQuery is the API to facilitate MapReduce-type iterations
over large data sets (like full table dumps).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamHealth(self, request, context):
"""StreamHealth runs a streaming RPC to the tablet, that returns the
current health of the tablet on a regular basis.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateStream(self, request, context):
"""UpdateStream asks the server to return a stream of the updates that have been applied to its database.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VStream(self, request, context):
"""VStream streams vreplication events.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VStreamRows(self, request, context):
"""VStreamRows streams rows from the specified starting point.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_QueryServicer_to_server(servicer, server):
rpc_method_handlers = {
'Execute': grpc.unary_unary_rpc_method_handler(
servicer.Execute,
request_deserializer=query__pb2.ExecuteRequest.FromString,
response_serializer=query__pb2.ExecuteResponse.SerializeToString,
),
'ExecuteBatch': grpc.unary_unary_rpc_method_handler(
servicer.ExecuteBatch,
request_deserializer=query__pb2.ExecuteBatchRequest.FromString,
response_serializer=query__pb2.ExecuteBatchResponse.SerializeToString,
),
'StreamExecute': grpc.unary_stream_rpc_method_handler(
servicer.StreamExecute,
request_deserializer=query__pb2.StreamExecuteRequest.FromString,
response_serializer=query__pb2.StreamExecuteResponse.SerializeToString,
),
'Begin': grpc.unary_unary_rpc_method_handler(
servicer.Begin,
request_deserializer=query__pb2.BeginRequest.FromString,
response_serializer=query__pb2.BeginResponse.SerializeToString,
),
'Commit': grpc.unary_unary_rpc_method_handler(
servicer.Commit,
request_deserializer=query__pb2.CommitRequest.FromString,
response_serializer=query__pb2.CommitResponse.SerializeToString,
),
'Rollback': grpc.unary_unary_rpc_method_handler(
servicer.Rollback,
request_deserializer=query__pb2.RollbackRequest.FromString,
response_serializer=query__pb2.RollbackResponse.SerializeToString,
),
'Prepare': grpc.unary_unary_rpc_method_handler(
servicer.Prepare,
request_deserializer=query__pb2.PrepareRequest.FromString,
response_serializer=query__pb2.PrepareResponse.SerializeToString,
),
'CommitPrepared': grpc.unary_unary_rpc_method_handler(
servicer.CommitPrepared,
request_deserializer=query__pb2.CommitPreparedRequest.FromString,
response_serializer=query__pb2.CommitPreparedResponse.SerializeToString,
),
'RollbackPrepared': grpc.unary_unary_rpc_method_handler(
servicer.RollbackPrepared,
request_deserializer=query__pb2.RollbackPreparedRequest.FromString,
response_serializer=query__pb2.RollbackPreparedResponse.SerializeToString,
),
'CreateTransaction': grpc.unary_unary_rpc_method_handler(
servicer.CreateTransaction,
request_deserializer=query__pb2.CreateTransactionRequest.FromString,
response_serializer=query__pb2.CreateTransactionResponse.SerializeToString,
),
'StartCommit': grpc.unary_unary_rpc_method_handler(
servicer.StartCommit,
request_deserializer=query__pb2.StartCommitRequest.FromString,
response_serializer=query__pb2.StartCommitResponse.SerializeToString,
),
'SetRollback': grpc.unary_unary_rpc_method_handler(
servicer.SetRollback,
request_deserializer=query__pb2.SetRollbackRequest.FromString,
response_serializer=query__pb2.SetRollbackResponse.SerializeToString,
),
'ConcludeTransaction': grpc.unary_unary_rpc_method_handler(
servicer.ConcludeTransaction,
request_deserializer=query__pb2.ConcludeTransactionRequest.FromString,
response_serializer=query__pb2.ConcludeTransactionResponse.SerializeToString,
),
'ReadTransaction': grpc.unary_unary_rpc_method_handler(
servicer.ReadTransaction,
request_deserializer=query__pb2.ReadTransactionRequest.FromString,
response_serializer=query__pb2.ReadTransactionResponse.SerializeToString,
),
'BeginExecute': grpc.unary_unary_rpc_method_handler(
servicer.BeginExecute,
request_deserializer=query__pb2.BeginExecuteRequest.FromString,
response_serializer=query__pb2.BeginExecuteResponse.SerializeToString,
),
'BeginExecuteBatch': grpc.unary_unary_rpc_method_handler(
servicer.BeginExecuteBatch,
request_deserializer=query__pb2.BeginExecuteBatchRequest.FromString,
response_serializer=query__pb2.BeginExecuteBatchResponse.SerializeToString,
),
'MessageStream': grpc.unary_stream_rpc_method_handler(
servicer.MessageStream,
request_deserializer=query__pb2.MessageStreamRequest.FromString,
response_serializer=query__pb2.MessageStreamResponse.SerializeToString,
),
'MessageAck': grpc.unary_unary_rpc_method_handler(
servicer.MessageAck,
request_deserializer=query__pb2.MessageAckRequest.FromString,
response_serializer=query__pb2.MessageAckResponse.SerializeToString,
),
'SplitQuery': grpc.unary_unary_rpc_method_handler(
servicer.SplitQuery,
request_deserializer=query__pb2.SplitQueryRequest.FromString,
response_serializer=query__pb2.SplitQueryResponse.SerializeToString,
),
'StreamHealth': grpc.unary_stream_rpc_method_handler(
servicer.StreamHealth,
request_deserializer=query__pb2.StreamHealthRequest.FromString,
response_serializer=query__pb2.StreamHealthResponse.SerializeToString,
),
'UpdateStream': grpc.unary_stream_rpc_method_handler(
servicer.UpdateStream,
request_deserializer=query__pb2.UpdateStreamRequest.FromString,
response_serializer=query__pb2.UpdateStreamResponse.SerializeToString,
),
'VStream': grpc.unary_stream_rpc_method_handler(
servicer.VStream,
request_deserializer=binlogdata__pb2.VStreamRequest.FromString,
response_serializer=binlogdata__pb2.VStreamResponse.SerializeToString,
),
'VStreamRows': grpc.unary_stream_rpc_method_handler(
servicer.VStreamRows,
request_deserializer=binlogdata__pb2.VStreamRowsRequest.FromString,
response_serializer=binlogdata__pb2.VStreamRowsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'queryservice.Query', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| |
##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks, succeed
from twisted.trial import unittest
from twistedcaldav import memcacher
from twistedcaldav.ical import Component
from twistedcaldav.stdconfig import config
from txdav.caldav.datastore.scheduling.processing import ImplicitProcessor
from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser
class FakeImplicitProcessor(ImplicitProcessor):
"""
A fake ImplicitProcessor that tracks batch refreshes.
"""
def __init__(self):
self.batches = 0
def _enqueueBatchRefresh(self, exclude_attendees):
self.batches += 1
def writeCalendarResource(self, collection, resource, calendar):
return succeed(FakeResource())
class FakePrincipal(object):
def __init__(self, cuaddr):
self.cuaddr = cuaddr
def calendarUserAddresses(self):
return (self.cuaddr,)
class FakeResource(object):
def parentCollection(self):
return self
def ownerHome(self):
return self
def uid(self):
return None
def id(self):
return 1
class BatchRefresh (unittest.TestCase):
"""
iCalendar support tests
"""
def setUp(self):
super(BatchRefresh, self).setUp()
config.Memcached.Pools.Default.ClientEnabled = False
config.Memcached.Pools.Default.ServerEnabled = False
memcacher.Memcacher.allowTestCache = True
memcacher.Memcacher.memoryCacheInstance = None
@inlineCallbacks
def test_queueAttendeeUpdate_no_refresh(self):
self.patch(config.Scheduling.Options, "AttendeeRefreshBatch", 5)
calendar = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER:urn:uuid:user01
ATTENDEE:urn:uuid:user01
ATTENDEE:urn:uuid:user02
END:VEVENT
END:VCALENDAR
""")
processor = FakeImplicitProcessor()
processor.txn = ""
processor.uid = "12345-67890"
processor.recipient_calendar_resource = FakeResource()
processor.recipient_calendar = calendar
yield processor.queueAttendeeUpdate(("urn:uuid:user02", "urn:uuid:user01",))
self.assertEqual(processor.batches, 0)
@inlineCallbacks
def test_queueAttendeeUpdate_with_refresh(self):
self.patch(config.Scheduling.Options, "AttendeeRefreshBatch", 5)
calendar = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER:urn:uuid:user01
ATTENDEE:urn:uuid:user01
ATTENDEE:urn:uuid:user02
ATTENDEE:urn:uuid:user03
END:VEVENT
END:VCALENDAR
""")
processor = FakeImplicitProcessor()
processor.txn = ""
processor.uid = "12345-67890"
processor.recipient_calendar_resource = FakeResource()
processor.recipient_calendar = calendar
yield processor.queueAttendeeUpdate(("urn:uuid:user02", "urn:uuid:user01",))
self.assertEqual(processor.batches, 1)
@inlineCallbacks
def test_queueAttendeeUpdate_count_suppressed(self):
self.patch(config.Scheduling.Options, "AttendeeRefreshCountLimit", 5)
self.patch(config.Scheduling.Options, "AttendeeRefreshBatch", 5)
calendar_small = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER:urn:uuid:user01
ATTENDEE:urn:uuid:user01
ATTENDEE:urn:uuid:user02
ATTENDEE:urn:uuid:user03
ATTENDEE:urn:uuid:user04
END:VEVENT
END:VCALENDAR
""")
itip_small = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
METHOD:REPLY
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER:urn:uuid:user01
ATTENDEE;PARTSTAT="ACCEPTED":urn:uuid:user02
END:VEVENT
END:VCALENDAR
""")
calendar_large = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER:urn:uuid:user01
ATTENDEE:urn:uuid:user01
ATTENDEE:urn:uuid:user02
ATTENDEE:urn:uuid:user03
ATTENDEE:urn:uuid:user04
ATTENDEE:urn:uuid:user05
ATTENDEE:urn:uuid:user06
ATTENDEE:urn:uuid:user07
ATTENDEE:urn:uuid:user08
ATTENDEE:urn:uuid:user09
ATTENDEE:urn:uuid:user10
END:VEVENT
END:VCALENDAR
""")
itip_large = Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
METHOD:REPLY
BEGIN:VEVENT
UID:12345-67890
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER:urn:uuid:user01
ATTENDEE;PARTSTAT="ACCEPTED":urn:uuid:user02
END:VEVENT
END:VCALENDAR
""")
for count, calendar, itip, result, msg in (
(5, calendar_small, itip_small, 1, "Small, count=5"),
(5, calendar_large, itip_large, 0, "Large, count=5"),
(0, calendar_small, itip_small, 1, "Small, count=0"),
(0, calendar_large, itip_large, 1, "Large, count=0"),
):
config.Scheduling.Options.AttendeeRefreshCountLimit = count
processor = FakeImplicitProcessor()
processor.txn = ""
processor.recipient_calendar = calendar.duplicate()
processor.uid = processor.recipient_calendar.newUID()
processor.recipient_calendar_resource = FakeResource()
processor.message = itip.duplicate()
processor.message.newUID(processor.uid)
processor.originator = LocalCalendarUser(None, None)
processor.recipient = LocalCalendarUser(None, None)
processor.uid = calendar.resourceUID()
processor.noAttendeeRefresh = False
processed = yield processor.doImplicitOrganizerUpdate()
self.assertTrue(processed[3] is not None, msg=msg)
self.assertEqual(processor.batches, result, msg=msg)
| |
import numpy as np
import matplotlib.pyplot as plt
from codim1.core import *
from codim1.assembly import *
from codim1.fast_lib import *
from codim1.post import *
import codim1.core.tools as tools
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 2
def test_fault_surface_intersection():
shear_modulus = 1.0
poisson_ratio = 0.25
n_elements_surface = 100
# n_elements_surface = 25
degree = 2
quad_min = degree + 1
quad_mult = 3
quad_max = quad_mult * degree
quad_logr = quad_mult * degree + (degree % 2)
quad_oneoverr = quad_mult * degree + (degree % 2)
interior_quad_pts = 13
di = 0.0
df = 1.0
x_di = -0.5
x_df = 0.5
# Determine fault parameters
# fault angle
left_end = np.array((x_di, -di))
right_end = np.array((x_df, -df))
fault_vector = left_end - right_end
# fault tangent and normal vectors
fault_tangential = fault_vector / np.linalg.norm(fault_vector)
fault_normal = np.array((fault_tangential[1], -fault_tangential[0]))
# Mesh the surface
main_surface_seg1_left = (-10.0, 0.0)
main_surface_seg1_right = (-0.5, 0.0)
main_surface_seg2_left = (-0.5, 0.0)
main_surface_seg2_right = (10.0, 0.0)
mesh1seg1 = simple_line_mesh(n_elements_surface / 2,
main_surface_seg1_left,
main_surface_seg1_right)
mesh1seg2 = simple_line_mesh(n_elements_surface / 2,
main_surface_seg2_left,
main_surface_seg2_right)
mesh1 = combine_meshes(mesh1seg1, mesh1seg2, ensure_continuity = True)
per_step = 5
steps = 10
ray_lengths = [1.0] * per_step
for i in range(1, steps):
ray_lengths.extend([2.0 ** float(i)] * per_step)
ray_left_dir = (-1.0, 0.0)
mesh2 = ray_mesh(main_surface_seg1_left, ray_left_dir,
ray_lengths, flip = True)
ray_right_dir = (1.0, 0.0)
mesh3 = ray_mesh(main_surface_seg2_right, ray_right_dir, ray_lengths)
part1 = combine_meshes(mesh1, mesh3, ensure_continuity = True)
surface_mesh = combine_meshes(mesh2, part1, ensure_continuity = True)
apply_to_elements(surface_mesh, "bc",
BC("traction", ZeroBasis()), non_gen = True)
# Mesh the fault
fault_elements = 50
fault_mesh = simple_line_mesh(fault_elements, left_end, right_end)
apply_to_elements(fault_mesh, "bc", BC("crack_displacement",
ConstantBasis(-fault_tangential)),
non_gen = True)
# Combine and apply pieces
mesh = combine_meshes(surface_mesh, fault_mesh, ensure_continuity = True)
bf = gll_basis(degree)
qs = QuadStrategy(mesh, quad_min, quad_max, quad_logr, quad_oneoverr)
apply_to_elements(mesh, "qs", qs, non_gen = True)
apply_to_elements(mesh, "basis", bf, non_gen = True)
sgbem_dofs(mesh)
ek = ElasticKernelSet(shear_modulus, poisson_ratio)
matrix, rhs = sgbem_assemble(mesh, ek)
lse = fault_mesh.elements[0].vertex1.connected_to[0]
rse = fault_mesh.elements[0].vertex1.connected_to[1]
constraint_dofx = lse.dofs[0, -1]
constraint_dofy = lse.dofs[1, -1]
other_dofx = rse.dofs[0, 0]
other_dofy = rse.dofs[1, 0]
matrix[constraint_dofx, :] = 0
matrix[constraint_dofy, :] = 0
rhs[constraint_dofx] = -1 / np.sqrt(2)
rhs[constraint_dofy] = 1 / np.sqrt(2)
matrix[constraint_dofx, constraint_dofx] = -1
matrix[constraint_dofx, other_dofx] = 1
matrix[constraint_dofy, constraint_dofy] = -1
matrix[constraint_dofy, other_dofy] = 1
# apply_average_constraint(matrix, rhs, surface_mesh)
# The matrix produced by the hypersingular kernel is singular, so I need
# to provide some further constraint in order to remove rigid body motions.
# I impose a constraint that forces the average displacement to be zero.
# apply_average_constraint(matrix, rhs, mesh)
soln_coeffs = np.linalg.solve(matrix, rhs)
apply_coeffs(mesh, soln_coeffs, "soln")
x, u, t = evaluate_boundary_solution(surface_mesh, soln_coeffs, 8)
def analytical_free_surface(x, x_d, d, delta, s):
"""
Analytical solution for the surface displacements from an infinite
buried edge dislocation. Add two of them with opposite slip to represent
an infinitely long thrust/normal fault.
Extracted from chapter 3 of Segall 2010.
"""
if abs(d) <= 1e-5:
return analytical_free_surface_intersect(x, x_d, delta, s)
xsi = (x - x_d) / d
factor = s / np.pi
term1 = np.cos(delta) * np.arctan(xsi)
term2 = (np.sin(delta) - xsi * np.cos(delta)) / (1 + xsi ** 2)
ux = factor * (term1 + term2)
term1 = np.sin(delta) * np.arctan(xsi)
term2 = (np.cos(delta) + xsi * np.sin(delta)) / (1 + xsi ** 2)
uy = -factor * (term1 + term2)
return ux, uy
def analytical_free_surface_intersect(x, x_d, delta, s):
factor = s / np.pi
ux = factor * np.cos(delta) * (np.pi / 2) * np.sign(x - x_d)
uy = -factor * np.sin(delta) * (np.pi / 2) * np.sign(x - x_d)
return ux, uy
# Compute the exact solution
x_e = x[0, :]
delta = np.arctan((df - di) / (x_df - x_di))
ux_exact1, uy_exact1 = analytical_free_surface(x_e, x_di, di, delta, -1.0)
ux_exact2, uy_exact2 = analytical_free_surface(x_e, x_df, df, delta, 1.0)
ux_exact = ux_exact1 + ux_exact2
uy_exact = uy_exact1 + uy_exact2
# assert(np.sum(np.abs(ux_exact - u[0,:])) < 0.1)
def comparison_plot():
u[0, :] -= np.mean(u[0, :])
u[1, :] -= np.mean(u[1, :])
plt.plot(x_e, ux_exact, '-o', label = 'Exact X Displacement')
plt.plot(x_e, uy_exact, '-o', label = 'Exact Y Displacement')
plt.plot(x_e, u[0, :], '-o',
linewidth = 2, label = 'Estimated X displacement')
plt.plot(x_e, u[1, :], '-o',
linewidth = 2, label = 'Estimated Y displacement')
plt.xlim(-4, 4)
plt.xlabel(r'$x/d$', fontsize = 18)
plt.ylabel(r'$u/s$', fontsize = 18)
plt.legend()
plt.show()
def error_plot():
x_error = np.abs(ux_exact - u[0, :]) / np.abs(ux_exact)
y_error = np.abs(uy_exact - u[1, :]) / np.abs(uy_exact)
plt.figure(1)
plt.xlim(-30, 30)
plt.ylim(0, 0.0001)
plt.plot(x_e, x_error, '*', label = '% X displacement Error')
plt.plot(x_e, y_error, '*', label = '% Y displacement Error')
plt.xlabel(r'$x/d$', fontsize = 18)
plt.ylabel(r'$100\left(\frac{|u_{exact} - u_{est}|}{s}\right)$', fontsize = 18)
plt.legend()
plt.show()
def interior_plot():
x_pts = 30
y_pts = 30
min_x = -3
max_x = 3
min_y = -3
max_y = 0
x = np.linspace(min_x, max_x, x_pts)
y = np.linspace(min_y, max_y, y_pts)
int_ux = np.zeros((y_pts, x_pts))
int_uy = np.zeros((y_pts, x_pts))
for i in range(x_pts):
print i
for j in range(y_pts):
u = sgbem_interior(mesh, (x[i], y[j]), np.zeros(2),
ek, "soln", "displacement")
int_ux[j, i] = u[0]
int_uy[j, i] = u[1]
X, Y = np.meshgrid(x, y)
def contf_plot(type, data):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
levels = np.linspace(-0.5, 0.5, 21)
tools.plot_mesh(fault_mesh, show = False, fig_ax = (fig, ax))
im = ax.contourf(X, Y, data, levels)
ax.contour(X, Y, data, levels,
colors = ('k',), linestyles=['solid'])
ax.set_ylabel(r'$x/d$', fontsize = 18)
ax.set_xlabel(r'$y/d$', fontsize = 18)
ax.set_title(type + ' displacement contours.')
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
fig.colorbar(im)
contf_plot('Vertical', int_uy)
contf_plot('Horizontal', int_ux)
plt.show()
comparison_plot()
# error_plot()
# Forming interior plot
# interior_plot()
if __name__ == "__main__":
test_fault_surface_intersection()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jsonmodels import errors
from jsonmodels import fields
import netaddr
import six
from dragonflow._i18n import _
from dragonflow.common import dhcp
from dragonflow.db import model_framework
from dragonflow.db import model_proxy
def _create_ref(proxy_type, value, lazy):
"""Create a proxy object based on:
* ID.
* Another proxy instance.
* Actual object of the proxied type.
In case where object is passed (rather than ID), the ID is extracted from
the relevant field.
"""
if isinstance(value, six.string_types):
obj_id = value
elif isinstance(value, (proxy_type, proxy_type.get_proxied_model())):
obj_id = value.id
else:
raise ValueError(
_('Reference field should only be initialized by ID or '
'model instance/reference'))
return proxy_type(id=obj_id, lazy=lazy)
class ReferenceField(fields.BaseField):
'''A field that holds a "foreign-key" to another model.
Used to reference an object stored outside of the model, as if it was
embedded into it, by creating proxys (with the help of model_proxy module)
In serialized form we store just the ID:
"lswitch": "uuid-of-some-lswitch",
and in the parsed form holds a proxy to this object:
>>> obj.lswitch.name
'some-lswitch'
'''
def __init__(self, model, lazy=True, *args, **kwargs):
super(ReferenceField, self).__init__(*args, **kwargs)
self._model = model
self._lazy = lazy
# We delay type creation until first access in case model is not
# available yet - i.e we got a string
self._types = None
def validate(self, value):
pass
@property
def types(self):
if self._types is None:
self._types = (
model_proxy.create_model_proxy(
model_framework.get_model(self._model),
),
)
return self._types
def parse_value(self, value):
if value is None:
return
return _create_ref(self.types[0], value, self._lazy)
def to_struct(self, obj):
if obj is not None:
return obj.id
class ListOfField(fields.ListField):
def __init__(self, field, *args, **kwargs):
super(ListOfField, self).__init__(
items_types=field.types, *args, **kwargs)
if not isinstance(field, fields.BaseField):
raise TypeError(
_('field must be an instance of BaseField. Got: %s') %
(type(field)))
self.field = field
def parse_value(self, values):
if not values:
return []
return [self.field.parse_value(value) for value in values]
def to_struct(self, objs):
if not objs:
return []
return [self.field.to_struct(obj) for obj in objs]
class ReferenceListField(ListOfField):
'''A field that holds a sequence of 'foreign-keys'
Much like ReferenceField above, this class allows accessing objects
referenced by ID as if they were embedded into the model itself.
Their serialized form is:
"security_groups": ["secgroupid1", "secgroupid2"],
And the parsed form is that of a list of proxies:
>>> obj.security_groups[1].name
'Name of the secgroup'
'''
def __init__(self, target_model, lazy=True, *args, **kwargs):
super(ReferenceListField, self).__init__(
ReferenceField(target_model, lazy), *args, **kwargs)
class IpAddressField(fields.BaseField):
'''A field that holds netaddr.IPAddress
In serialized form it is stored as IP address string:
"ip": "10.0.0.12",
'''
types = (netaddr.IPAddress,)
def parse_value(self, value):
if value is not None:
return netaddr.IPAddress(value)
def to_struct(self, obj):
if obj is not None:
return str(obj)
class IpNetworkField(fields.BaseField):
'''A field that holds netaddr.IPNetwork
In serialized form it is stored as CIDR:
"network": "10.0.0.0/24",
'''
types = (netaddr.IPNetwork,)
def parse_value(self, value):
if value is not None:
return netaddr.IPNetwork(value)
def to_struct(self, obj):
if obj is not None:
return str(obj)
TimestampField = fields.FloatField
class MacAddressField(fields.BaseField):
'''A field representing a MAC address, specifically a netaddr.EUI.
In serialized form it is stored in UNIX MAC format:
"mac": "12:34:56:78:90:ab"
'''
types = (netaddr.EUI,)
def parse_value(self, value):
if value is not None:
return netaddr.EUI(value, dialect=netaddr.mac_unix_expanded)
def to_struct(self, obj):
if obj is not None:
obj.dialect = netaddr.mac_unix_expanded
return str(obj)
class EnumField(fields.StringField):
'''A field that can hold a string from a set of predetermined values:
>>> class F(ModelBase):
... f = EnumField(('a', 'b', 'c'))
>>> F(f='a') # OK
>>> F(f='d') # raises
Traceback...
....
ValidationError: ...
'''
types = six.string_types
def __init__(self, values, *args, **kwargs):
super(EnumField, self).__init__(*args, **kwargs)
self._valid_values = values
def validate(self, value):
super(EnumField, self).validate(value)
if value is not None and value not in self._valid_values:
raise errors.ValidationError(
_('{value} is not one of: [{valid_values}]').format(
value=value, valid_values=', '.join(self._valid_values)))
class EnumListField(fields.ListField):
'''A field that stores a list of strings restricted to predetermined values
Similar to EnumField above, allowed entries in the list are restricted to
a list provided during field's creation.
'''
def __init__(self, values, *args, **kwargs):
super(EnumListField, self).__init__(six.string_types, *args, **kwargs)
self._valid_values = values
def validate(self, value):
if self.required and not value:
raise errors.ValidationError(_('Field is required!'))
if value is None:
return
for elem in value:
if elem not in self._valid_values:
raise errors.ValidationError(
_('{value} is not one of: [{valid_values}]').format(
value=value,
valid_values=', '.join(self._valid_values)))
class DhcpOptsDictField(fields.BaseField):
'''A field that stores a mapping between
int (represented the dhcp tag) ->
string (represent the dhcp value)
'''
types = (dict,)
def parse_value(self, value):
if value is not None:
return {int(key): inner_val for key, inner_val in value.items()}
def to_struct(self, obj):
if obj is not None:
return {str(key): inner_val for key, inner_val in obj.items()}
def validate(self, value):
super(DhcpOptsDictField, self).validate(value)
if not value:
return
for key, inner_val in value.items():
if not dhcp.is_tag_valid(key):
raise errors.ValidationError(
_('Key {} is not a vaild dhcp opt').format(key))
if not isinstance(inner_val, six.string_types):
raise errors.ValidationError(
_('Value {value} to key {key} is not a string').format(
key=key, value=inner_val))
def get_default_value(self):
return {}
class PortRange(object):
def __init__(self, port_min, port_max):
self.min = port_min
self.max = port_max
@classmethod
def from_min_max(cls, port_min, port_max):
if port_min is not None and port_max is not None:
return cls(port_min, port_max)
def __eq__(self, other):
if type(other) != PortRange:
return False
return (self.min, self.max) == (other.min, other.max)
def __ne__(self, other):
return not (self == other)
class PortRangeField(fields.BaseField):
types = (PortRange,)
def to_struct(self, value):
if value is None or value == [None, None]:
return
return [value.min, value.max]
def parse_value(self, value):
if value is not None:
if isinstance(value, PortRange):
return value
else:
# Raise an error if list in not of 2 values
port_min, port_max = value
return PortRange(port_min, port_max)
class IpProto(fields.IntField):
def validate(self, value):
super(IpProto, self).validate(value)
if value is None:
return
if value < 0 or value > 255:
raise errors.ValidationError(
_('IP protocol value must to be in the'
' range [0,255] ({val} supplied )'
).format(
val=value))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.