gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
#
# musiccube.py
#
# Copyright (C) 2015 Jeremiah J. Leonard
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import os
import shelve
from analyzer import Analyzer
from banshee import Banshee
from numbacube import NumbaCube
from progress import Progress
from matplotlib import pyplot, colors
from mpl_toolkits.mplot3d import Axes3D
from pylab import figure
class MusicCube:
# path and file name of music database
DB_PATH = ".musiccube"
DB_NAME = "musiccube.dbm"
def __init__(self):
Progress("Total Time", 1).display()
# get music path from Banshee
self.banshee = Banshee()
self.music_path = self.banshee.library_source()
# get full path of music database
db_path = os.path.join(self.music_path, self.DB_PATH)
db_file = os.path.join(db_path, self.DB_NAME)
# create path to music database
if not os.path.exists(db_path):
os.makedirs(db_path)
# get and update music data
self.music_shelve = shelve.open(db_file, writeback=True)
self.update_music_data()
# transform columns to be between 0 and 1
self.scale_music_data()
# calculate number of nodes per edge
cube_edge = int(len(self.music_data) ** (1 / 3.0))
# create or load music cube
self.numba_cube = NumbaCube(
edge_length=cube_edge,
node_weights=Analyzer.FEATURES_LENGTH,
npy_path=db_path,
random_seed=1)
def __del__(self):
self.music_shelve.close()
def get_paths(self):
return self.music_shelve.keys()
def get_features(self, song):
# calculate scaled song features
song_data = np.array(self.music_shelve[song])
# normalize by column
song_data = self.scale_by_column(song_data)
return song_data
def get_position(self, song):
# return cube coordinates of song
song_features = self.get_features(song)
return self.numba_cube.get_position(song_features)
def update_music_data(self):
analyzer = Analyzer()
music_list = self.banshee.get_tracks()
# delete previously analyzed songs no longer existing in Banshee
for mp3 in self.music_shelve:
if mp3 not in music_list:
del self.music_shelve[mp3]
self.music_shelve.sync()
song_count = len(music_list)
progress = Progress("Analyzing Songs", song_count)
# calculate and save features of new songs
for mp3 in music_list:
if mp3 not in self.music_shelve:
features = analyzer.compute_features(mp3)
if analyzer.valid_features(features):
self.music_shelve[mp3] = features
self.music_shelve.sync()
progress.display()
# convert music data to array
self.music_data = np.array(self.music_shelve.values())
def update_banshee(self):
self.counter = {}
positions = {}
paths = self.get_paths()
song_count = len(paths)
progress = Progress("Updating Banshee", song_count)
for song in paths:
position = self.get_position(song)
positions[song] = position
# count song positions for plotting
if position not in self.counter:
self.counter[position] = 1
else:
self.counter[position] += 1
progress.display()
# update song positions in Banshee
self.banshee.update_tracks(positions)
def scale_music_data(self):
# scale music data column wise
mins = np.min(self.music_data, axis=0)
self.maxs = np.max(self.music_data, axis=0)
self.rng = self.maxs - mins
self.music_data = self.scale_by_column(self.music_data)
def scale_by_column(self, data, high=1.0, low=0.0):
return high - (((high - low) * (self.maxs - data)) / self.rng)
def train_numbacube(self):
self.numba_cube.train(self.music_data)
self.numba_cube.save()
def plot(self):
# create and show scatter plot
ax = Axes3D(figure())
# transform to array
data = np.array([(key[0], key[1], key[2], val)
for key, val in self.counter.items()])
# sort by position counter
data = data[np.argsort(data[:, 3])]
# minimum and maximum counter
min = np.min(data[:, 3])
max = np.max(data[:, 3])
# setup color mapping
colormap = pyplot.cm.ScalarMappable(
norm=colors.Normalize(vmin=min, vmax=max),
cmap=pyplot.cm.get_cmap('RdYlBu_r'))
# initialize loop
group = 0
loops = len(data)
for ix in range(loops):
# determine current group
count = data[ix][3]
# group header
if not count == group:
xs = []
ys = []
zs = []
# group body
xs.append(data[ix][0])
ys.append(data[ix][1])
zs.append(data[ix][2])
group = count
# group footer
# last item or last item of group
if (ix == loops - 1) or not (data[ix + 1][3] == group):
color = colormap.to_rgba(group)
size = 10 + group * 10
ax.scatter(xs, ys, zs, c=color, s=size)
ax.set_title("MusicCube")
pyplot.show()
if __name__ == '__main__':
music_cube = MusicCube()
music_cube.train_numbacube()
music_cube.update_banshee()
music_cube.plot()
print "Done."
| |
import copy
import floppyforms as forms
from crispy_forms.bootstrap import Tab, TabHolder
from crispy_forms.layout import Field, HTML, Layout, Fieldset
from django.forms.models import modelform_factory
from django.utils.translation import ugettext_lazy as _
from horizon.utils.memoized import memoized
from django import forms as django_forms
from horizon_contrib.common import get_class
from leonardo.forms import SelfHandlingModelForm, SelfHandlingForm
from django.utils.text import slugify
from ..models import Page, PageTheme, PageColorScheme
class SwitchableFormFieldMixin(object):
def get_switched_form_field_attrs(self, prefix, input_type, name):
"""Creates attribute dicts for the switchable theme form
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}
attributes['data-' + prefix + 'field-' + input_type] = name
return attributes
def switchable_field_attrs(self):
return {'class': 'switchable',
'data-slug': 'switchablefield'
}
class PageColorSchemeSwitchableFormMixin(SwitchableFormFieldMixin):
def init_color_scheme_switch(self, color_scheme=None):
color_scheme_fields = []
for theme in self.fields['theme'].queryset:
name = 'theme__%s' % theme.id
attributes = self.get_switched_form_field_attrs(
'switchable', '%s' % theme.id, ('Color Scheme'))
field = django_forms.ModelChoiceField(label=_('Color Scheme'),
queryset=theme.templates.all(),
required=False)
# inital for color scheme
if color_scheme and theme.templates.filter(id=color_scheme.id).exists():
field.initial = color_scheme
elif 'parent' in self.fields and self.fields['parent'].initial:
field.initial = self.fields['parent'].initial.color_scheme
elif self.instance and hasattr(self.instance, 'color_scheme'):
field.initial = self.instance.color_scheme
else:
field.initial = theme.templates.first()
self.fields[name] = field
color_scheme_fields.append(name)
# update theme widget attributes
self.fields['theme'].widget.attrs = self.switchable_field_attrs()
return color_scheme_fields
class PageCreateForm(PageColorSchemeSwitchableFormMixin, SelfHandlingModelForm):
slug = forms.SlugField(required=False, initial=None)
class Meta:
model = Page
widgets = {
'parent': forms.widgets.HiddenInput,
}
exclude = tuple()
def clean_slug(self):
"""slug title if is not provided
"""
slug = self.cleaned_data.get('slug', None)
if slug is None or len(slug) == 0:
slug = slugify(self.cleaned_data['title'])
return slug
def __init__(self, *args, **kwargs):
parent = kwargs.pop('parent', None)
super(PageCreateForm, self).__init__(*args, **kwargs)
self.fields['parent'].initial = parent
color_scheme_fields = self.init_color_scheme_switch(
color_scheme=kwargs['initial'].get('color_scheme', None))
self.helper.layout = Layout(
TabHolder(
Tab(_('Main'),
'title',
'language',
'translation_of',
'site',
css_id='page-main'
),
Tab(_('Navigation'),
'in_navigation', 'parent', 'slug', 'override_url', 'redirect_to',
'symlinked_page'
),
Tab(_('Heading'),
'_content_title', '_page_title',
css_id='page-heading'
),
Tab(_('Publication'),
'active', 'featured', 'publication_date', 'publication_end_date',
),
Tab(_('Theme'),
'template_key', 'layout', Fieldset(
'Themes', 'theme', *color_scheme_fields),
css_id='page-theme-settings'
),
)
)
self.fields['color_scheme'].required = False
def clean(self):
cleaned = super(PageCreateForm, self).clean()
theme = cleaned['theme']
cleaned['color_scheme'] = self.cleaned_data['theme__%s' % theme.id]
return cleaned
class PageUpdateForm(PageColorSchemeSwitchableFormMixin, SelfHandlingModelForm):
class Meta:
model = Page
widgets = {
'parent': forms.widgets.HiddenInput,
'override_url': forms.widgets.HiddenInput,
'publication_date': forms.widgets.DateInput,
}
exclude = tuple()
def clean(self):
cleaned = super(PageUpdateForm, self).clean()
theme = cleaned['theme']
cleaned['color_scheme'] = self.cleaned_data['theme__%s' % theme.id]
return cleaned
def __init__(self, *args, **kwargs):
request = kwargs.pop('request', None)
super(PageUpdateForm, self).__init__(*args, **kwargs)
color_scheme_fields = self.init_color_scheme_switch()
self.helper.layout = Layout(
TabHolder(
Tab(_('Main'),
'title',
'language',
'translation_of',
'site',
css_id='page-main'
),
Tab(_('Heading'),
'_content_title', '_page_title',
css_id='page-heading'
),
Tab(_('Publication'),
'active', 'featured', 'publication_date', 'publication_end_date',
),
Tab(_('Navigation'),
'in_navigation', 'parent', 'slug', 'override_url', 'redirect_to',
'symlinked_page'
),
Tab(_('Theme'),
'template_key', 'layout', Fieldset(
'Themes', 'theme', *color_scheme_fields),
css_id='page-theme-settings'
),
)
)
if request:
_request = copy.copy(request)
_request.POST = {}
if kwargs.get('instance', None):
page = kwargs['instance']
from .tables import PageDimensionTable
table = PageDimensionTable(
_request, page=page, data=page.dimensions, needs_form_wrapper=False)
dimensions = Tab(_('Dimensions'),
HTML(
table.render()),
css_id='page-dimensions'
)
self.helper.layout[0].append(dimensions)
self.fields['color_scheme'].required = False
class PageDeleteForm(SelfHandlingForm):
def handle(self, request, data):
pass
| |
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 Service action implementations"""
import logging
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common
LOG = logging.getLogger(__name__)
class CreateService(command.ShowOne):
_description = _("Create new service")
def get_parser(self, prog_name):
parser = super(CreateService, self).get_parser(prog_name)
parser.add_argument(
'type',
metavar='<type>',
help=_('New service type (compute, image, identity, volume, etc)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New service name'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New service description'),
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help=_('Enable service (default)'),
)
enable_group.add_argument(
'--disable',
action='store_true',
help=_('Disable service'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
enabled = True
if parsed_args.disable:
enabled = False
service = identity_client.services.create(
name=parsed_args.name,
type=parsed_args.type,
description=parsed_args.description,
enabled=enabled,
)
service._info.pop('links')
return zip(*sorted(service._info.items()))
class DeleteService(command.Command):
_description = _("Delete service(s)")
def get_parser(self, prog_name):
parser = super(DeleteService, self).get_parser(prog_name)
parser.add_argument(
'service',
metavar='<service>',
nargs='+',
help=_('Service(s) to delete (type, name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
result = 0
for i in parsed_args.service:
try:
service = common.find_service(identity_client, i)
identity_client.services.delete(service.id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete consumer with type, "
"name or ID '%(service)s': %(e)s"),
{'service': i, 'e': e})
if result > 0:
total = len(parsed_args.service)
msg = (_("%(result)s of %(total)s services failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListService(command.Lister):
_description = _("List services")
def get_parser(self, prog_name):
parser = super(ListService, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
return parser
def take_action(self, parsed_args):
if parsed_args.long:
columns = ('ID', 'Name', 'Type', 'Description', 'Enabled')
else:
columns = ('ID', 'Name', 'Type')
data = self.app.client_manager.identity.services.list()
return (
columns,
(utils.get_item_properties(s, columns) for s in data),
)
class SetService(command.Command):
_description = _("Set service properties")
def get_parser(self, prog_name):
parser = super(SetService, self).get_parser(prog_name)
parser.add_argument(
'service',
metavar='<service>',
help=_('Service to modify (type, name or ID)'),
)
parser.add_argument(
'--type',
metavar='<type>',
help=_('New service type (compute, image, identity, volume, etc)'),
)
parser.add_argument(
'--name',
metavar='<service-name>',
help=_('New service name'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New service description'),
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help=_('Enable service'),
)
enable_group.add_argument(
'--disable',
action='store_true',
help=_('Disable service'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
service = common.find_service(identity_client,
parsed_args.service)
kwargs = {}
if parsed_args.type:
kwargs['type'] = parsed_args.type
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.description:
kwargs['description'] = parsed_args.description
if parsed_args.enable:
kwargs['enabled'] = True
if parsed_args.disable:
kwargs['enabled'] = False
identity_client.services.update(
service.id,
**kwargs
)
class ShowService(command.ShowOne):
_description = _("Display service details")
def get_parser(self, prog_name):
parser = super(ShowService, self).get_parser(prog_name)
parser.add_argument(
'service',
metavar='<service>',
help=_('Service to display (type, name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
service = common.find_service(identity_client, parsed_args.service)
service._info.pop('links')
return zip(*sorted(service._info.items()))
| |
import py
from rpython.rtyper.lltypesystem import lltype, rffi, llmemory
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.jit.backend.llsupport import symbolic, support
from rpython.jit.metainterp.history import AbstractDescr, getkind, FLOAT, INT
from rpython.jit.metainterp import history
from rpython.jit.codewriter import heaptracker, longlong
from rpython.jit.codewriter.longlong import is_longlong
from rpython.jit.metainterp.optimizeopt import intbounds
from rpython.rtyper import rclass
class GcCache(object):
def __init__(self, translate_support_code, rtyper=None):
self.translate_support_code = translate_support_code
self.rtyper = rtyper
self._cache_size = {}
self._cache_field = {}
self._cache_array = {}
self._cache_arraylen = {}
self._cache_call = {}
self._cache_interiorfield = {}
def setup_descrs(self):
all_descrs = []
for k, v in self._cache_size.iteritems():
v.descr_index = len(all_descrs)
all_descrs.append(v)
for k, v in self._cache_field.iteritems():
for k1, v1 in v.iteritems():
v1.descr_index = len(all_descrs)
all_descrs.append(v1)
for k, v in self._cache_array.iteritems():
v.descr_index = len(all_descrs)
all_descrs.append(v)
for k, v in self._cache_arraylen.iteritems():
v.descr_index = len(all_descrs)
all_descrs.append(v)
for k, v in self._cache_call.iteritems():
v.descr_index = len(all_descrs)
all_descrs.append(v)
for k, v in self._cache_interiorfield.iteritems():
v.descr_index = len(all_descrs)
all_descrs.append(v)
assert len(all_descrs) < 2**15
return all_descrs
def init_size_descr(self, STRUCT, sizedescr):
pass
def init_array_descr(self, ARRAY, arraydescr):
assert (isinstance(ARRAY, lltype.GcArray) or
isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld)
# ____________________________________________________________
# SizeDescrs
class SizeDescr(AbstractDescr):
size = 0 # help translation
tid = llop.combine_ushort(lltype.Signed, 0, 0)
vtable = lltype.nullptr(rclass.OBJECT_VTABLE)
immutable_flag = False
def __init__(self, size, gc_fielddescrs=None, all_fielddescrs=None,
vtable=lltype.nullptr(rclass.OBJECT_VTABLE),
immutable_flag=False):
assert lltype.typeOf(vtable) == lltype.Ptr(rclass.OBJECT_VTABLE)
self.size = size
self.gc_fielddescrs = gc_fielddescrs
self.all_fielddescrs = all_fielddescrs
self.vtable = vtable
self.immutable_flag = immutable_flag
def get_all_fielddescrs(self):
return self.all_fielddescrs
def repr_of_descr(self):
return '<SizeDescr %s>' % self.size
def is_object(self):
return bool(self.vtable)
def is_valid_class_for(self, struct):
objptr = lltype.cast_opaque_ptr(rclass.OBJECTPTR, struct)
cls = llmemory.cast_adr_to_ptr(
heaptracker.int2adr(self.get_vtable()),
lltype.Ptr(rclass.OBJECT_VTABLE))
# this first comparison is necessary, since we want to make sure
# that vtable for JitVirtualRef is the same without actually reading
# fields
return objptr.typeptr == cls or rclass.ll_isinstance(objptr, cls)
def is_immutable(self):
return self.immutable_flag
def get_vtable(self):
return heaptracker.adr2int(llmemory.cast_ptr_to_adr(self.vtable))
def get_type_id(self):
assert self.tid
return self.tid
def get_size_descr(gccache, STRUCT, vtable=lltype.nullptr(rclass.OBJECT_VTABLE)):
cache = gccache._cache_size
assert not isinstance(vtable, bool)
try:
return cache[STRUCT]
except KeyError:
size = symbolic.get_size(STRUCT, gccache.translate_support_code)
immutable_flag = heaptracker.is_immutable_struct(STRUCT)
if vtable:
assert heaptracker.has_gcstruct_a_vtable(STRUCT)
else:
assert not heaptracker.has_gcstruct_a_vtable(STRUCT)
sizedescr = SizeDescr(size, vtable=vtable,
immutable_flag=immutable_flag)
gccache.init_size_descr(STRUCT, sizedescr)
cache[STRUCT] = sizedescr
# XXX do we really need gc_fielddescrs if we also have
# all_fielddescrs and can ask is_pointer_field() on them?
gc_fielddescrs = heaptracker.gc_fielddescrs(gccache, STRUCT)
sizedescr.gc_fielddescrs = gc_fielddescrs
all_fielddescrs = heaptracker.all_fielddescrs(gccache, STRUCT)
sizedescr.all_fielddescrs = all_fielddescrs
return sizedescr
# ____________________________________________________________
# FieldDescrs
FLAG_POINTER = 'P'
FLAG_FLOAT = 'F'
FLAG_UNSIGNED = 'U'
FLAG_SIGNED = 'S'
FLAG_STRUCT = 'X'
FLAG_VOID = 'V'
class ArrayOrFieldDescr(AbstractDescr):
vinfo = None
def get_vinfo(self):
return self.vinfo
class FieldDescr(ArrayOrFieldDescr):
name = ''
offset = 0 # help translation
field_size = 0
flag = '\x00'
def __init__(self, name, offset, field_size, flag, index_in_parent=0,
is_pure=False):
self.name = name
self.offset = offset
self.field_size = field_size
self.flag = flag
self.index = index_in_parent
self._is_pure = is_pure
def is_always_pure(self):
return self._is_pure
def __repr__(self):
return 'FieldDescr<%s>' % (self.name,)
def assert_correct_type(self, struct):
# similar to cpu.protect_speculative_field(), but works also
# if supports_guard_gc_type is false (and is allowed to crash).
if self.parent_descr.is_object():
assert self.parent_descr.is_valid_class_for(struct)
else:
pass
def is_pointer_field(self):
return self.flag == FLAG_POINTER
def is_float_field(self):
return self.flag == FLAG_FLOAT
def is_field_signed(self):
return self.flag == FLAG_SIGNED
def is_integer_bounded(self):
return self.flag in (FLAG_SIGNED, FLAG_UNSIGNED) \
and self.field_size < symbolic.WORD
def get_integer_min(self):
if self.flag == FLAG_UNSIGNED:
return intbounds.get_integer_min(True, self.field_size)
elif self.flag == FLAG_SIGNED:
return intbounds.get_integer_min(False, self.field_size)
assert False
def get_integer_max(self):
if self.flag == FLAG_UNSIGNED:
return intbounds.get_integer_max(True, self.field_size)
elif self.flag == FLAG_SIGNED:
return intbounds.get_integer_max(False, self.field_size)
assert False
def sort_key(self):
return self.offset
def repr_of_descr(self):
ispure = " pure" if self._is_pure else ""
return '<Field%s %s %s%s>' % (self.flag, self.name, self.offset, ispure)
def get_parent_descr(self):
return self.parent_descr
def get_index(self):
return self.index
def get_field_descr(gccache, STRUCT, fieldname):
cache = gccache._cache_field
try:
return cache[STRUCT][fieldname]
except KeyError:
offset, size = symbolic.get_field_token(STRUCT, fieldname,
gccache.translate_support_code)
FIELDTYPE = getattr(STRUCT, fieldname)
flag = get_type_flag(FIELDTYPE)
name = '%s.%s' % (STRUCT._name, fieldname)
index_in_parent = heaptracker.get_fielddescr_index_in(STRUCT, fieldname)
is_pure = STRUCT._immutable_field(fieldname) != False
fielddescr = FieldDescr(name, offset, size, flag, index_in_parent,
is_pure)
cachedict = cache.setdefault(STRUCT, {})
cachedict[fieldname] = fielddescr
if STRUCT is rclass.OBJECT:
vtable = lltype.nullptr(rclass.OBJECT_VTABLE)
else:
vtable = heaptracker.get_vtable_for_gcstruct(gccache, STRUCT)
fielddescr.parent_descr = get_size_descr(gccache, STRUCT, vtable)
return fielddescr
def get_type_flag(TYPE):
if isinstance(TYPE, lltype.Ptr):
if TYPE.TO._gckind == 'gc':
return FLAG_POINTER
else:
return FLAG_UNSIGNED
if isinstance(TYPE, lltype.Struct):
return FLAG_STRUCT
if TYPE is lltype.Float or is_longlong(TYPE):
return FLAG_FLOAT
if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and
rffi.cast(TYPE, -1) == -1):
return FLAG_SIGNED
return FLAG_UNSIGNED
def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT):
cache = gccache._cache_arraylen
try:
return cache[ARRAY_OR_STRUCT]
except KeyError:
tsc = gccache.translate_support_code
(_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc)
size = symbolic.get_size(lltype.Signed, tsc)
result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed))
result.parent_descr = None
cache[ARRAY_OR_STRUCT] = result
return result
# ____________________________________________________________
# ArrayDescrs
class ArrayDescr(ArrayOrFieldDescr):
tid = 0
basesize = 0 # workaround for the annotator
itemsize = 0
lendescr = None
flag = '\x00'
vinfo = None
all_interiorfielddescrs = None
concrete_type = '\x00'
def __init__(self, basesize, itemsize, lendescr, flag, is_pure=False, concrete_type='\x00'):
self.basesize = basesize # this includes +1 for STR
self.itemsize = itemsize
self.lendescr = lendescr # or None, if no length
self.flag = flag
self._is_pure = is_pure
self.concrete_type = concrete_type
def get_all_fielddescrs(self):
return self.all_interiorfielddescrs
def is_always_pure(self):
return self._is_pure
def getconcrete_type(self):
return self.concrete_type
def is_array_of_primitives(self):
return self.flag == FLAG_FLOAT or \
self.flag == FLAG_SIGNED or \
self.flag == FLAG_UNSIGNED
def is_array_of_pointers(self):
return self.flag == FLAG_POINTER
def is_array_of_floats(self):
return self.flag == FLAG_FLOAT
def is_item_signed(self):
return self.flag == FLAG_SIGNED
def get_item_size_in_bytes(self):
return self.itemsize
def is_array_of_structs(self):
return self.flag == FLAG_STRUCT
def is_item_integer_bounded(self):
return self.flag in (FLAG_SIGNED, FLAG_UNSIGNED) \
and self.itemsize < symbolic.WORD
def get_item_integer_min(self):
if self.flag == FLAG_UNSIGNED:
return intbounds.get_integer_min(True, self.itemsize)
elif self.flag == FLAG_SIGNED:
return intbounds.get_integer_min(False, self.itemsize)
assert False
def get_item_integer_max(self):
if self.flag == FLAG_UNSIGNED:
return intbounds.get_integer_max(True, self.itemsize)
elif self.flag == FLAG_SIGNED:
return intbounds.get_integer_max(False, self.itemsize)
assert False
def get_type_id(self):
assert self.tid
return self.tid
def repr_of_descr(self):
return '<Array%s %s>' % (self.flag, self.itemsize)
def get_array_descr(gccache, ARRAY_OR_STRUCT):
cache = gccache._cache_array
try:
return cache[ARRAY_OR_STRUCT]
except KeyError:
tsc = gccache.translate_support_code
basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc)
if isinstance(ARRAY_OR_STRUCT, lltype.Array):
ARRAY_INSIDE = ARRAY_OR_STRUCT
else:
ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld]
if ARRAY_INSIDE._hints.get('nolength', False):
lendescr = None
else:
lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT)
flag = get_type_flag(ARRAY_INSIDE.OF)
is_pure = bool(ARRAY_INSIDE._immutable_field(None))
arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag, is_pure)
if ARRAY_INSIDE.OF is lltype.SingleFloat or \
ARRAY_INSIDE.OF is lltype.Float:
# it would be better to set the flag as FLOAT_TYPE
# for single float -> leads to problems
arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag, is_pure, concrete_type='f')
cache[ARRAY_OR_STRUCT] = arraydescr
if isinstance(ARRAY_INSIDE.OF, lltype.Struct):
descrs = heaptracker.all_interiorfielddescrs(gccache,
ARRAY_INSIDE, get_field_descr=get_interiorfield_descr)
arraydescr.all_interiorfielddescrs = descrs
if ARRAY_OR_STRUCT._gckind == 'gc':
gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr)
return arraydescr
# ____________________________________________________________
# InteriorFieldDescr
class InteriorFieldDescr(AbstractDescr):
arraydescr = ArrayDescr(0, 0, None, '\x00') # workaround for the annotator
fielddescr = FieldDescr('', 0, 0, '\x00')
def __init__(self, arraydescr, fielddescr):
assert arraydescr.flag == FLAG_STRUCT
self.arraydescr = arraydescr
self.fielddescr = fielddescr
def get_index(self):
return self.fielddescr.get_index()
def get_arraydescr(self):
return self.arraydescr
def get_field_descr(self):
return self.fielddescr
def sort_key(self):
return self.fielddescr.sort_key()
def is_pointer_field(self):
return self.fielddescr.is_pointer_field()
def is_float_field(self):
return self.fielddescr.is_float_field()
def is_integer_bounded(self):
return self.fielddescr.is_integer_bounded()
def get_integer_min(self):
return self.fielddescr.get_integer_min()
def get_integer_max(self):
return self.fielddescr.get_integer_max()
def repr_of_descr(self):
return '<InteriorFieldDescr %s>' % self.fielddescr.repr_of_descr()
def get_interiorfield_descr(gc_ll_descr, ARRAY, name, arrayfieldname=None):
# can be used either with a GcArray of Structs, or with a GcStruct
# containing an inlined GcArray of Structs (then arrayfieldname != None).
cache = gc_ll_descr._cache_interiorfield
try:
return cache[(ARRAY, name, arrayfieldname)]
except KeyError:
arraydescr = get_array_descr(gc_ll_descr, ARRAY)
if arrayfieldname is None:
REALARRAY = ARRAY
else:
REALARRAY = getattr(ARRAY, arrayfieldname)
fielddescr = get_field_descr(gc_ll_descr, REALARRAY.OF, name)
descr = InteriorFieldDescr(arraydescr, fielddescr)
cache[(ARRAY, name, arrayfieldname)] = descr
return descr
# ____________________________________________________________
# CallDescrs
def _missing_call_stub_i(func, args_i, args_r, args_f):
return 0
def _missing_call_stub_r(func, args_i, args_r, args_f):
return lltype.nullptr(llmemory.GCREF.TO)
def _missing_call_stub_f(func, args_i, args_r, args_f):
return longlong.ZEROF
class CallDescr(AbstractDescr):
arg_classes = '' # <-- annotation hack
result_type = '\x00'
result_flag = '\x00'
ffi_flags = 1
def __init__(self, arg_classes, result_type, result_signed, result_size,
extrainfo=None, ffi_flags=1):
"""
'arg_classes' is a string of characters, one per argument:
'i', 'r', 'f', 'L', 'S'
'result_type' is one character from the same list or 'v'
'result_signed' is a boolean True/False
"""
self.arg_classes = arg_classes
self.result_type = result_type
self.result_size = result_size
self.extrainfo = extrainfo
self.ffi_flags = ffi_flags
self.call_stub_i = _missing_call_stub_i
self.call_stub_r = _missing_call_stub_r
self.call_stub_f = _missing_call_stub_f
# NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which
# makes sense on Windows as it's the one for all the C functions
# we are compiling together with the JIT. On non-Windows platforms
# it is just ignored anyway.
if result_type == 'v':
result_flag = FLAG_VOID
elif result_type == 'i':
if result_signed:
result_flag = FLAG_SIGNED
else:
result_flag = FLAG_UNSIGNED
elif result_type == history.REF:
result_flag = FLAG_POINTER
elif result_type == history.FLOAT or result_type == 'L':
result_flag = FLAG_FLOAT
elif result_type == 'S':
result_flag = FLAG_UNSIGNED
else:
raise NotImplementedError("result_type = '%s'" % (result_type,))
self.result_flag = result_flag
def __repr__(self):
res = 'CallDescr(%s)' % (self.arg_classes,)
extraeffect = getattr(self.extrainfo, 'extraeffect', None)
if extraeffect is not None:
res += ' EF=%r' % extraeffect
oopspecindex = getattr(self.extrainfo, 'oopspecindex', 0)
if oopspecindex:
from rpython.jit.codewriter.effectinfo import EffectInfo
for key, value in EffectInfo.__dict__.items():
if key.startswith('OS_') and value == oopspecindex:
break
else:
key = 'oopspecindex=%r' % oopspecindex
res += ' ' + key
return '<%s>' % res
def get_extra_info(self):
return self.extrainfo
def get_ffi_flags(self):
return self.ffi_flags
def get_call_conv(self):
from rpython.rlib.clibffi import get_call_conv
return get_call_conv(self.ffi_flags, True)
def get_arg_types(self):
return self.arg_classes
def get_result_type(self):
return self.result_type
def get_normalized_result_type(self):
if self.result_type == 'S':
return 'i'
if self.result_type == 'L':
return 'f'
return self.result_type
def get_result_size(self):
return self.result_size
def is_result_signed(self):
return self.result_flag == FLAG_SIGNED
def create_call_stub(self, rtyper, RESULT):
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
assert self.get_call_conv() == FFI_DEFAULT_ABI, (
"%r: create_call_stub() with a non-default call ABI" % (self,))
def process(c):
if c == 'L':
assert longlong.supports_longlong
c = 'f'
elif c == 'f' and longlong.supports_longlong:
return 'longlong.getrealfloat(%s)' % (process('L'),)
elif c == 'S':
return 'longlong.int2singlefloat(%s)' % (process('i'),)
arg = 'args_%s[%d]' % (c, seen[c])
seen[c] += 1
return arg
def TYPE(arg):
if arg == 'i':
return lltype.Signed
elif arg == 'f':
return lltype.Float
elif arg == 'r':
return llmemory.GCREF
elif arg == 'v':
return lltype.Void
elif arg == 'L':
return lltype.SignedLongLong
elif arg == 'S':
return lltype.SingleFloat
else:
raise AssertionError(arg)
seen = {'i': 0, 'r': 0, 'f': 0}
args = ", ".join([process(c) for c in self.arg_classes])
result_type = self.get_result_type()
if result_type == history.INT:
result = 'rffi.cast(lltype.Signed, res)'
category = 'i'
elif result_type == history.REF:
assert RESULT == llmemory.GCREF # should be ensured by the caller
result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)'
category = 'r'
elif result_type == history.FLOAT:
result = 'longlong.getfloatstorage(res)'
category = 'f'
elif result_type == 'L':
result = 'rffi.cast(lltype.SignedLongLong, res)'
category = 'f'
elif result_type == history.VOID:
result = '0'
category = 'i'
elif result_type == 'S':
result = 'longlong.singlefloat2int(res)'
category = 'i'
else:
assert 0
source = py.code.Source("""
def call_stub(func, args_i, args_r, args_f):
fnptr = rffi.cast(lltype.Ptr(FUNC), func)
res = support.maybe_on_top_of_llinterp(rtyper, fnptr)(%(args)s)
return %(result)s
""" % locals())
ARGS = [TYPE(arg) for arg in self.arg_classes]
FUNC = lltype.FuncType(ARGS, RESULT)
d = globals().copy()
d.update(locals())
exec source.compile() in d
call_stub = d['call_stub']
# store the function into one of three attributes, to preserve
# type-correctness of the return value
setattr(self, 'call_stub_%s' % category, call_stub)
def verify_types(self, args_i, args_r, args_f, return_type):
assert self.result_type in return_type
assert (self.arg_classes.count('i') +
self.arg_classes.count('S')) == len(args_i or ())
assert self.arg_classes.count('r') == len(args_r or ())
assert (self.arg_classes.count('f') +
self.arg_classes.count('L')) == len(args_f or ())
def repr_of_descr(self):
res = 'Call%s %d' % (self.result_type, self.result_size)
if self.arg_classes:
res += ' ' + self.arg_classes
if self.extrainfo:
res += ' EF=%d' % self.extrainfo.extraeffect
oopspecindex = self.extrainfo.oopspecindex
if oopspecindex:
res += ' OS=%d' % oopspecindex
return '<%s>' % res
def map_type_to_argclass(ARG, accept_void=False):
kind = getkind(ARG)
if kind == 'int':
if ARG is lltype.SingleFloat: return 'S'
else: return 'i'
elif kind == 'ref': return 'r'
elif kind == 'float':
if is_longlong(ARG): return 'L'
else: return 'f'
elif kind == 'void':
if accept_void: return 'v'
raise NotImplementedError('ARG = %r' % (ARG,))
def get_call_descr(gccache, ARGS, RESULT, extrainfo=None):
arg_classes = map(map_type_to_argclass, ARGS)
arg_classes = ''.join(arg_classes)
result_type = map_type_to_argclass(RESULT, accept_void=True)
RESULT_ERASED = RESULT
if RESULT is lltype.Void:
result_size = 0
result_signed = False
else:
if isinstance(RESULT, lltype.Ptr):
# avoid too many CallDescrs
if result_type == 'r':
RESULT_ERASED = llmemory.GCREF
else:
RESULT_ERASED = llmemory.Address
result_size = symbolic.get_size(RESULT_ERASED,
gccache.translate_support_code)
result_signed = get_type_flag(RESULT) == FLAG_SIGNED
key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo)
cache = gccache._cache_call
try:
calldescr = cache[key]
except KeyError:
calldescr = CallDescr(arg_classes, result_type, result_signed,
result_size, extrainfo)
calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED)
cache[key] = calldescr
assert repr(calldescr.result_size) == repr(result_size)
return calldescr
def unpack_arraydescr(arraydescr):
assert isinstance(arraydescr, ArrayDescr)
ofs = arraydescr.basesize # this includes +1 for STR
size = arraydescr.itemsize
sign = arraydescr.is_item_signed()
return size, ofs, sign
def unpack_fielddescr(fielddescr):
assert isinstance(fielddescr, FieldDescr)
ofs = fielddescr.offset
size = fielddescr.field_size
sign = fielddescr.is_field_signed()
return ofs, size, sign
unpack_fielddescr._always_inline_ = True
def unpack_interiorfielddescr(descr):
assert isinstance(descr, InteriorFieldDescr)
arraydescr = descr.arraydescr
ofs = arraydescr.basesize
itemsize = arraydescr.itemsize
fieldsize = descr.fielddescr.field_size
sign = descr.fielddescr.is_field_signed()
ofs += descr.fielddescr.offset
return ofs, itemsize, fieldsize, sign
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _get_feature_dict(features):
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `_Head` instance.
* hidden_units: List of hidden units per layer.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use the Adagrad
optimizer with a default learning rate of 0.05.
* activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
* dropout: When not `None`, the probability we will drop out a given
coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
* input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
head = params["head"]
hidden_units = params["hidden_units"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or "Adagrad"
activation_fn = params.get("activation_fn")
dropout = params.get("dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
input_layer_min_slice_size = (
params.get("input_layer_min_slice_size") or 64 << 20)
num_ps_replicas = config.num_ps_replicas if config else 0
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
features = _get_feature_dict(features)
parent_scope = "dnn"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
parent_scope,
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=input_layer_min_slice_size))
with variable_scope.variable_scope(
"input_from_feature_columns",
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as input_layer_scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=input_layer_scope)
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(net,)) as hidden_layer_scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=activation_fn,
variables_collections=[parent_scope],
scope=hidden_layer_scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
with variable_scope.variable_scope(
"logits",
values=(net,)) as logits_scope:
logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(logits, logits_scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
gradient_multipliers=(
dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, parent_scope,
input_layer_scope.name)),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class DNNClassifier(estimator.Estimator):
"""A classifier for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y represents label's class index).
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None):
"""Initializes a DNNClassifier instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
It must be greater than 1. Note: Class labels are integers representing
the class index (i.e. values from 0 to n_classes-1). For arbitrary
label values (e.g. string labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
Returns:
A `DNNClassifier` estimator.
Raises:
ValueError: If `n_classes` < 2.
"""
self._feature_columns = tuple(feature_columns or [])
super(DNNClassifier, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib.multi_class_head(
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units": hidden_units,
"feature_columns": self._feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key].reshape(-1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self,
x=None,
input_fn=None,
batch_size=None,
as_iterable=True):
"""Returns predicted probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return super(DNNClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class DNNRegressor(estimator.Estimator):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
label_dimension=1,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
Returns:
A `DNNRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
super(DNNRegressor, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib.regression_head(
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units": hidden_units,
"feature_columns": self._feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""See evaluable.Evaluable."""
# TODO(zakaria): remove once deprecation is finished (b/31229024)
custom_metrics = {}
if metrics:
for key, metric in six.iteritems(metrics):
if (not isinstance(metric, metric_spec.MetricSpec) and
not isinstance(key, tuple)):
custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric
else:
custom_metrics[key] = metric
return super(DNNRegressor, self).evaluate(
x=x,
y=y,
input_fn=input_fn,
feed_fn=feed_fn,
batch_size=batch_size,
steps=steps,
metrics=custom_metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return super(DNNRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=signature_fn or export.regression_signature_fn,
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class DNNEstimator(estimator.Estimator):
"""A Estimator for TensorFlow DNN models with user specified _Head.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
To create a DNNEstimator for binary classification, where
estimator = DNNEstimator(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
head=tf.contrib.learn.multi_class_head(n_classes=2),
hidden_units=[1024, 512, 256])
If your label is keyed with "y" in your labels dict, and weights are keyed
with "w" in features dict, and you want to enable centered bias,
head = tf.contrib.learn.multi_class_head(
n_classes=2,
label_name="x",
weight_column_name="w",
enable_centered_bias=True)
estimator = DNNEstimator(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
head=head,
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y represents label's class index).
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
head,
hidden_units,
feature_columns,
model_dir=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None):
"""Initializes a `DNNEstimator` instance.
Args:
head: `Head` instance.
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
Returns:
A `DNNEstimator` estimator.
"""
super(DNNEstimator, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head": head,
"hidden_units": hidden_units,
"feature_columns": feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
| |
__author__ = 'Jossef Harush'
def get_ftp_banner_info(banner):
# Lower the banner's case in order to get case insensitive match
banner = banner.lower()
server = None
operating_system = None
if any(hint for hint, os in ftp_servers.iteritems() if hint in banner):
server, operating_system = ((hint, os) for hint, os in ftp_servers.iteritems() if hint in banner).next()
return server, operating_system
def get_smtp_banner_info(banner):
# Lower the banner's case in order to get case insensitive match
banner = banner.lower()
server = None
operating_system = None
if any(hint for hint, os in smtp_servers.iteritems() if hint in banner):
server, operating_system = ((hint, os) for hint, os in smtp_servers.iteritems() if hint in banner).next()
return server, operating_system
def get_http_banner_info(banner):
# Lower the banner's case in order to get case insensitive match
banner = banner.lower()
server = known_banner_web_servers.get(banner, None)
operating_system = None
# If we successfully matched a server
if server:
if any(item in banner for item in windows_hints):
operating_system = 'windows'
elif any(item in banner for item in linux_hints):
distribution = (item in banner for item in linux_hints).next()
operating_system = 'linux ({0})'.format(distribution)
elif any(item in banner for item in mac_os_hints):
operating_system = 'mac os'
# Otherwise, let's try to guess using hints
else:
if any(item in banner for item in hosting_hints):
operating_system = 'filtered (hosting protection)'
server = banner
return server, operating_system
# -------------------------------------------------------------------
# Static hard-coded data below (in real life should be more dynamic..)
# -- -- -- -- -- -- -- --
# Most info has been scarped from http://www.computec.ch/projekte/httprecon/?s=database&t=head_existing&f=banner
known_banner_web_servers = {
'0w/0.8c': '0w 0.8c',
'webstar/2.0 id/33333': '4d webstar 2.0',
'webstar/2.1.1 id/33333': '4d webstar 2.1.1',
'webstar/3.0.2 id/878810': '4d webstar 3.0.2',
'webstar/4.2(ssl) id/79106': '4d webstar 4.2',
'webstar/4.5(ssl) id/878810': '4d webstar 4.5',
'4d_webstar_s/5.3.1 (macos x)': '4d webstar 5.3.1',
'4d_webstar_s/5.3.3 (macos x)': '4d webstar 5.3.3',
'4d_webstar_s/5.4.0 (macos x)': '4d webstar 5.4.0',
'aidex/1.1 (win32)': 'aidex mini-webserver 1.1',
'naviserver/2.0 aolserver/2.3.3': 'aolserver 2.3.3',
'aolserver/3.3.1+ad13': 'aolserver 3.3.1',
'aolserver 3.4.2': 'aolserver 3.4.2',
'aolserver/3.4.2 sp/1': 'aolserver 3.4.2',
'aolserver/3.5.10': 'aolserver 3.4.2',
'aolserver/3.5.0': 'aolserver 3.5.0',
'aolserver/4.0.10': 'aolserver 4.0.10',
'aolserver/4.0.10a': 'aolserver 4.0.10a',
'aolserver/4.0.11a': 'aolserver 4.0.11a',
'aolserver/4.5.0': 'aolserver 4.5.0',
'abyss/2.0.0.20-x2-win32 abysslib/2.0.0.20': 'abyss 2.0.0.20 x2',
'abyss/2.4.0.3-x2-win32 abysslib/2.4.0.3': 'abyss 2.4.0.3 x2',
'abyss/2.5.0.0-x1-win32 abysslib/2.5.0.0': 'abyss 2.5.0.0 x1',
'abyss/2.5.0.0-x2-linux abysslib/2.5.0.0': 'abyss 2.5.0.0 x2',
'abyss/2.5.0.0-x2-macos x abysslib/2.5.0.0': 'abyss 2.5.0.0 x2',
'abyss/2.5.0.0-x2-win32 abysslib/2.5.0.0': 'abyss 2.5.0.0 x2',
'abyss/2.6.0.0-x2-linux abysslib/2.6.0.0': 'abyss 2.6.0.0 x2',
'allegroserve/1.2.50': 'allegroserve 1.2.50',
'anti-web v3.0.7 (fear and loathing on the www)': 'anti-web httpd 3.0.7',
'antiweb/4.0beta13': 'anti-web httpd 4.0beta13',
'apache/1.2.6': 'apache 1.2.6',
'apache/1.3.12 (unix) php/3.0.14': 'apache 1.3.12',
'apache/1.3.17 (win32)': 'apache 1.3.17',
'apache/1.3.26 (linux/suse) mod_ssl/2.8.10 openssl/0.9.6g php/4.2.2': 'apache 1.3.26',
'apache/1.3.26 (unitedlinux) mod_python/2.7.8 python/2.2.1 php/4.2.2': 'apache 1.3.26',
'apache/1.3.26 (unix)': 'apache 1.3.26',
'apache/1.3.26 (unix) debian gnu/linux php/4.1.2': 'apache 1.3.26',
'apache/1.3.26 (unix) debian gnu/linux mod_ssl/2.8.9 openssl/0.9.6g': 'apache 1.3.26',
'mit web server apache/1.3.26 mark/1.5 (unix) mod_ssl/2.8.9': 'apache 1.3.26',
'apache/1.3.27 (linux/suse) mod_ssl/2.8.12 openssl/0.9.6i php/4.3.1': 'apache 1.3.27',
'apache/1.3.27 (turbolinux) mod_throttle/3.1.2 mod_ruby/0.9.7 ruby/1.6.4': 'apache 1.3.27',
'apache/1.3.27 (unix) (red-hat/linux)': 'apache 1.3.27',
'apache/1.3.27 (unix) (red-hat/linux) mod_python/2.7.8 python/1.5.2': 'apache 1.3.27',
'apache/1.3.27 (unix) (red-hat/linux) mod_ssl/2.8.12 openssl/0.9.6b': 'apache 1.3.27',
'apache/1.3.27 (unix) php/4.3.1': 'apache 1.3.27',
'apache/1.3.27 (unix) mod_perl/1.27': 'apache 1.3.27',
'apache/1.3.27 (win32)': 'apache 1.3.27',
'apache/1.3.28 (unix) mod_perl/1.27 php/4.3.3': 'apache 1.3.28',
'apache/1.3.29 (debian gnu/linux) mod_perl/1.29': 'apache 1.3.29',
'apache/1.3.29 (unix)': 'apache 1.3.29',
'apache/1.3.31 (unix)': 'apache 1.3.31',
'anu_webapp': 'apache 1.3.33',
'apache/1.3.33 (darwin) php/5.2.1': 'apache 1.3.33',
'apache/1.3.33 (darwin) mod_ssl/2.8.24 openssl/0.9.7l mod_jk/1.2.25': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) php/4.3.10-20 mod_perl/1.29': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) php/4.3.8-9 mod_ssl/2.8.22': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) mod_gzip/1.3.26.1a php/4.3.10-22': 'apache 1.3.33',
'apache/1.3.33 (debian gnu/linux) mod_python/2.7.10 python/2.3.4': 'apache 1.3.33',
'apache/1.3.33 (openpkg/2.4) mod_gzip/1.3.26.1a php/4.3.11 mod_watch/3.17': 'apache 1.3.33',
'apache/1.3.33 (unix) php/4.3.10 frontpage/5.0.2.2510': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_auth_passthrough/1.8 mod_bwlimited/1.4': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_fastcgi/2.4.2 mod_gzip/1.3.26.1a mod_ssl/2.8.22': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_perl/1.29': 'apache 1.3.33',
'apache/1.3.33 (unix) mod_ssl/2.8.22 openssl/0.9.7d php/4.3.10': 'apache 1.3.33',
'apache/1.3.34': 'apache 1.3.34',
'apache/1.3.34 (debian) authmysql/4.3.9-2 mod_ssl/2.8.25 openssl/0.9.8c': 'apache 1.3.34',
'apache/1.3.34 (debian) php/4.4.4-8+etch4': 'apache 1.3.34',
'apache/1.3.34 (debian) php/5.2.0-8+etch7 mod_ssl/2.8.25 openssl/0.9.8c': 'apache 1.3.34',
'apache/1.3.34 (unix) (gentoo) mod_fastcgi/2.4.2': 'apache 1.3.34',
'apache/1.3.34 (unix) (gentoo) mod_perl/1.30': 'apache 1.3.34',
'apache/1.3.34 (unix) (gentoo) mod_ssl/2.8.25 openssl/0.9.7e': 'apache 1.3.34',
'apache/1.3.34 (unix) php/4.4.2 mod_perl/1.29 dav/1.0.3 mod_ssl/2.8.25': 'apache 1.3.34',
'apache/1.3.34 (unix) mod_jk/1.2.15 mod_perl/1.29 mod_gzip/1.3.26.1a': 'apache 1.3.34',
'apache/1.3.35 (unix)': 'apache 1.3.35',
'apache/1.3.27 (unix) (red-hat/linux) mod_perl/1.26 php/4.3.3': 'apache 1.3.37',
'apache/1.3.37 (unix) frontpage/5.0.2.2635 mod_ssl/2.8.28 openssl/0.9.7m': 'apache 1.3.37',
'apache/1.3.37 (unix) php/4.3.11': 'apache 1.3.37',
'apache/1.3.37 (unix) php/4.4.7 mod_throttle/3.1.2 frontpage/5.0.2.2635': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.1.2': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.2.0': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.2.1': 'apache 1.3.37',
'apache/1.3.37 (unix) php/5.2.3 mod_auth_passthrough/1.8': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_auth_passthrough/1.8 mod_log_bytes/1.2': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_perl/1.29': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_perl/1.30 mod_ssl/2.8.28 openssl/0.9.7e-p1': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_ssl/2.8.28 openssl/0.9.7d': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_ssl/2.8.28 openssl/0.9.8d': 'apache 1.3.37',
'apache/1.3.37 (unix) mod_throttle/3.1.2 dav/1.0.3 mod_fastcgi/2.4.2': 'apache 1.3.37',
'apache/1.3.37 ben-ssl/1.57 (unix) mod_gzip/1.3.26.1a mod_fastcgi/2.4.2': 'apache 1.3.37',
'apache/1.3.37.fb1': 'apache 1.3.37',
'apache/1.3.39 (unix) dav/1.0.3 mod_auth_passthrough/1.8': 'apache 1.3.39',
'apache/1.3.39 (unix) php/4.4.7': 'apache 1.3.39',
'apache/1.3.39 (unix) php/5.2.3 mod_bwlimited/1.4': 'apache 1.3.39',
'apache/1.3.39 (unix) php/5.2.5 dav/1.0.3 mod_ssl/2.8.30 openssl/0.9.7c': 'apache 1.3.39',
'apache/1.3.39 (unix) mod_auth_passthrough/1.8 mod_log_bytes/1.2': 'apache 1.3.39',
'apache/1.3.39 (unix) mod_fastcgi/2.4.2 mod_auth_passthrough/1.8': 'apache 1.3.39',
'apache/1.3.39 ben-ssl/1.57 (unix) mod_perl/1.30 frontpage/5.0.2.2624': 'apache 1.3.39',
'apache/1.3.41 (unix) php/5.2.8': 'apache 1.3.41',
'apache/2.0.45 (unix) mod_jk2/2.0.3-dev': 'apache 2.0.45',
'apache/2.0.45 (unix) mod_perl/1.99_09-dev perl/v5.6.1 covalent_auth/2.3': 'apache 2.0.45',
'apache/2.0.46 (centos)': 'apache 2.0.46',
'apache/2.0.46 (red hat)': 'apache 2.0.46',
'apache/2.0.46 (white box)': 'apache 2.0.46',
'apache/2.0.48 (redhat 9/server4you)': 'apache 2.0.48',
'apache/2.0.49 (linux/suse)': 'apache 2.0.49',
'apache/2.0.49 (unix) php/4.3.9': 'apache 2.0.49',
'apache/2.0.50 (linux/suse)': 'apache 2.0.50',
'apache/2.0.50 (ydl)': 'apache 2.0.50',
'apache/2.0.51 (fedora)': 'apache 2.0.51',
'apache/2.0.52 (centos)': 'apache 2.0.52',
'apache/2.0.52 (fedora)': 'apache 2.0.52',
'apache/2.0.52 (red hat)': 'apache 2.0.52',
'apache/2.0.52 (unix)': 'apache 2.0.52',
'apache/2.0.52 (unix) dav/2 php/4.4.1': 'apache 2.0.52',
'apache/2.0.52 (win32)': 'apache 2.0.52',
'apache/2.0.52 (win32) mod_ssl/2.0.52 openssl/0.9.7e mod_auth_sspi/1.0.1': 'apache 2.0.52',
'apache/2.0.53 (linux/suse)': 'apache 2.0.53',
'apache/2.0.54 (debian gnu/linux) dav/2 svn/1.1.4': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) php/4.3.10-18': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) php/4.3.10-22 mod_ssl/2.0.54': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) php/5.1.2': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) mod_jk/1.2.14 php/5.2.4-0.dotdeb.0 with': 'apache 2.0.54',
'apache/2.0.54 (debian gnu/linux) mod_ssl/2.0.54 openssl/0.9.7e php/4.4.6': 'apache 2.0.54',
'apache/2.0.54 (fedora)': 'apache 2.0.54',
'apache/2.0.54 (linux/suse)': 'apache 2.0.54',
'apache/2.0.54 (netware) mod_jk/1.2.14': 'apache 2.0.54',
'apache/2.0.54 (unix) php/4.4.7 mod_ssl/2.0.54 openssl/0.9.7e': 'apache 2.0.54',
'apache/2.0.55': 'apache 2.0.55',
'apache/2.0.55 (freebsd) php/5.2.3 with suhosin-patch': 'apache 2.0.55',
'apache/2.0.55 (ubuntu) dav/2 php/4.4.2-1.1 mod_ssl/2.0.55 openssl/0.9.8b': 'apache 2.0.55',
'apache/2.0.55 (ubuntu) php/5.1.2': 'apache 2.0.55',
'apache/2.0.55 (unix) dav/2 mod_ssl/2.0.55 openssl/0.9.8a php/4.4.4': 'apache 2.0.55',
'apache/2.0.55 (unix) mod_ssl/2.0.55 openssl/0.9.7i mod_jk/1.2.15': 'apache 2.0.55',
'apache/2.0.55 (unix) mod_ssl/2.0.55 openssl/0.9.8a jrun/4.0': 'apache 2.0.55',
'apache/2.0.58 (unix)': 'apache 2.0.58',
'apache/2.0.58 (win32) php/5.1.4': 'apache 2.0.58',
'apache/2.0.58 (win32) php/5.1.5': 'apache 2.0.58',
'apache/2.0.59 (freebsd) dav/2 php/5.2.1 with suhosin-patch': 'apache 2.0.59',
'apache/2.0.59 (freebsd) mod_fastcgi/2.4.2 php/4.4.4 with suhosin-patch': 'apache 2.0.59',
'apache/2.0.59 (netware) mod_jk/1.2.15': 'apache 2.0.59',
'apache/2.0.59 (unix) mod_ssl/2.0.59 openssl/0.9.7e mod_jk/1.2.15': 'apache 2.0.59',
'apache/2.0.59 (unix) mod_ssl/2.0.59 openssl/0.9.8d mod_fastcgi/2.4.2': 'apache 2.0.59',
'apache/2.0.63 (red hat)': 'apache 2.0.63',
'apache/2.2.0 (freebsd) mod_ssl/2.2.0 openssl/0.9.7e-p1 dav/2 php/5.1.2': 'apache 2.2.0',
'apache/2.2.11 (freebsd)': 'apache 2.2.11',
'apache/2.2.2 (fedora)': 'apache 2.2.2',
'apache/2.2.3 (centos)': 'apache 2.2.3',
'apache/2.2.3 (debian) dav/2 svn/1.4.2 mod_python/3.2.10 python/2.4.4': 'apache 2.2.3',
'apache/2.2.3 (debian) php/4.4.4-8+etch4': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch7': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch7 mod_ssl/2.2.3 openssl/0.9.8c': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch7 mod_ssl/2.2.3 openssl/0.9.8e': 'apache 2.2.3',
'apache/2.2.3 (debian) php/5.2.0-8+etch9': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_fastcgi/2.4.2 php/5.2.0-8+etch7 mod_ssl/2.2.3': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_jk/1.2.18 php/5.2.0-8+etch5~pu1 mod_ssl/2.2.3': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_jk/1.2.18 php/5.2.0-8+etch7': 'apache 2.2.3',
'apache/2.2.3 (debian) mod_ssl/2.2.3 openssl/0.9.8c php/5.2.4': 'apache 2.2.3',
'apache/2.2.3 (linux/suse)': 'apache 2.2.3',
'apache/2.2.3 (mandriva linux/prefork-1mdv2007.0)': 'apache 2.2.3',
'apache/2.2.3 (red hat)': 'apache 2.2.3',
'apache/2.2.3 (unix) php/5.2.1': 'apache 2.2.3',
'apache/2.2.4 (debian) php/4.4.4-9+lenny1 mod_ssl/2.2.4 openssl/0.9.8e': 'apache 2.2.4',
'apache/2.2.4 (fedora)': 'apache 2.2.4',
'apache/2.2.4 (fedora) mod_ssl/2.2.4 openssl/0.9.8b dav/2': 'apache 2.2.4',
'apache/2.2.4 (freebsd)': 'apache 2.2.4',
'apache/2.2.4 (unix) dav/2 php/5.2.1rc3-dev mod_ruby/1.2.5': 'apache 2.2.4',
'apache/2.2.4 (unix) mod_ssl/2.2.4 openssl/0.9.7e dav/2 svn/1.4.2': 'apache 2.2.4',
'apache/2.2.4 (win32)': 'apache 2.2.4',
'apache/2.2.6 (debian) dav/2 php/4.4.4-9 mod_ssl/2.2.6 openssl/0.9.8g': 'apache 2.2.6',
'apache/2.2.6 (debian) dav/2 svn/1.4.4 mod_python/3.3.1 python/2.4.4': 'apache 2.2.6',
'apache/2.2.6 (debian) php/5.2.4-2 with suhosin-patch mod_ssl/2.2.6': 'apache 2.2.6',
'apache/2.2.6 (freebsd) mod_ssl/2.2.6 openssl/0.9.8e dav/2': 'apache 2.2.6',
'apache/2.2.6 (unix) mod_ssl/2.2.6 openssl/0.9.7a dav/2 mod_mono/1.2.4': 'apache 2.2.6',
'apache/2.2.6 (unix) mod_ssl/2.2.6 openssl/0.9.7a mod_jk/1.2.25': 'apache 2.2.6',
'apache/2.2.6 (unix) mod_ssl/2.2.6 openssl/0.9.8b dav/2 php/5.2.5 with': 'apache 2.2.6',
'apache': 'apache 2.2.8',
'apache/2.2.8 (freebsd) mod_ssl/2.2.8 openssl/0.9.8g dav/2 php/5.2.5': 'apache 2.2.8',
'apache/2.2.8 (unix) mod_ssl/2.2.8 openssl/0.9.8g': 'apache 2.2.8',
'apache/2.2.8 (unix)': 'apache 2.2.9',
'apache/2.3.0-dev (unix)': 'apache 2.3.0',
'araneida/0.84': 'araneida 0.84',
'\'s webserver': 'ashleys webserver',
'badblue/2.4': 'badblue 2.4',
'badblue/2.5': 'badblue 2.5',
'badblue/2.6': 'badblue 2.6',
'badblue/2.7': 'badblue 2.7',
'barracudaserver.com (posix)': 'barracudadrive 3.9.1',
'basehttp/0.3 python/2.4.4': 'basehttpserver 0.3',
'boa/0.92o': 'boa 0.92o',
'boa/0.93.15': 'boa 0.93.15',
'boa/0.94.14rc21': 'boa 0.94.14rc21',
'cl-http/70.216 (lispworks': 'cl-http 70.216',
'caudium/1.4.9 stable': 'caudium 1.4.9',
'cherokee': 'cherokee 0.6.0',
'cherokee/0.99': 'cherokee 0.99',
'virata-emweb/r6_0_1': 'cisco vpn 3000 concentrator virata emweb r6.2.0',
'virata-emweb/r6_2_0': 'cisco vpn 3000 concentrator virata emweb r6.2.0',
'compaqhttpserver/5.2': 'compaq http server 5.2',
'compaqhttpserver/5.7': 'compaq http server 5.7',
'compaqhttpserver/5.91': 'compaq http server 5.91',
'compaqhttpserver/5.94': 'compaq http server 5.94',
'compaqhttpserver/9.9 hp system management homepage/2.1.7.168': 'compaq http server 9.9',
'cougar/9.5.6001.6264': 'cougar 9.5.6001.6264',
'goahead-webs': 'flexwatch fw-3440-b',
'gatling/0.10': 'gatling 0.10',
'gatling/0.9': 'gatling 0.9',
'globalscape-secure server/3.3': 'globalscape secure server 3.3',
'gws': 'google web server 2.1',
'mfe': 'google web server 2.1',
'sffe': 'google web server 2.1',
'httpi/1.5.2 (demonic/aix)': 'httpi 1.5.2',
'httpi/1.6.1 (demonic/aix)': 'httpi 1.6.1',
'hiawatha v6.11': 'hiawatha 6.11',
'hiawatha/6.2 mod_gwbasic/1.7.3 openxor/0.3.1a': 'hiawatha 6.2',
'ibm_http_server/2.0.47.1 apache/2.0.47 (unix)': 'ibm http server 2.0.47.1',
'ibm_http_server/6.0.2.19 apache/2.0.47 (unix)': 'ibm http server 6.0.2.19',
'ibm_http_server/6.0.2.19 apache/2.0.47 (unix) dav/2': 'ibm http server 6.0.2.19',
'ibm_http_server': 'ibm http server 6.1.0.19',
'ipc@chip': 'ipc@chip 1.04',
'icewarp/8.3': 'icewarp 8.3.0',
'indy/9.00.10': 'indy idhttpserver 9.00.10',
'jana-server/2.4.8.51': 'jana-server 2.4.8.51',
'jetty/5.1.10 (linux/2.6.12 i386 java/1.5.0_05': 'jetty 5.1.10',
'jetty/5.1.1 (linux/2.6.9-5.elsmp i386 java/1.5.0_09': 'jetty 5.1.1',
'jetty(6.1.1)': 'jetty 6.1.1',
'jigsaw/2.2.5': 'jigsaw 2.2.5',
'jigsaw/2.2.6': 'jigsaw 2.2.6',
'jigsaw/2.3.0-beta1': 'jigsaw 2.3.0-beta1',
'kget': 'kget web interface 2.1.3',
'klone/2.1.0rc1': 'klone 2.1.0rc1',
'allegro-software-rompager/2.00': 'konica ip-421/7020 allegro rompager 2.00',
'boa/0.94.13': 'linksys wvc54gc boa 0.94.13',
'listmanagerweb/8.8c (based on tcl-webserver/3.4.2)': 'listmanagerweb 8.8c',
'litespeed': 'litespeed web server 3.3',
'domino-go-webserver/4.6.2.5': 'lotus domino go webserver 4.6.2.5',
'mathopd/1.5p6': 'mathopd 1.5p6',
'microsoft-iis/5.0': 'microsoft iis 5.0',
'microsoft-iis/5.1': 'microsoft iis 5.1',
'microsoft-iis/6.0': 'microsoft iis 6.0',
'microsoft-iis/6.0.0': 'microsoft iis 6.0',
'microsoft-iis/7.0': 'microsoft iis 7.0',
'mongrel 1.0': 'mongrel 1.0',
'aegis_nanoweb/2.2.10-dev (linux': 'nanoweb 2.2.10',
'rapid logic/1.1': 'net2phone rapid logic 1.1',
'thttpd/2.25b 29dec2003': 'netbotz 500 thttpd 2.25b',
'netware-enterprise-web-server/5.1': 'netware enterprise web server 5.1',
'zyxel-rompager/3.02': 'netgear rp114 3.26',
'allegro-software-rompager/2.10': 'netopia router allegro rompager 2.10',
'netscape-enterprise/2.01': 'netscape enterprise server 2.01',
'netscape-enterprise/3.5.1': 'netscape enterprise server 3.5.1',
'netscape-enterprise/3.5.1g': 'netscape enterprise server 3.5.1g',
'netscape-enterprise/4.1': 'netscape enterprise server 4.1',
'netscape-enterprise/6.0': 'netscape enterprise server 6.0',
'netscape-fasttrack/3.02': 'netscape fasttrack 3.02a',
'osu/3.12alpha': 'osu 3.12alpha',
'osu/3.9': 'osu 3.9',
'omnihttpd/2.06': 'omnihttpd 2.06',
'omnihttpd/2.09': 'omnihttpd 2.09',
'omnihttpd/2.10': 'omnihttpd 2.10',
'opensa/1.0.1 / apache/1.3.23 (win32) php/4.1.1 dav/1.0.2': 'opensa 1.0.1',
'opensa/1.0.3 / apache/1.3.26 (win32) mod_ssl/2.8.9 openssl/0.9.6g': 'opensa 1.0.3',
'opensa/1.0.4 / apache/1.3.27 (win32) php/4.2.2 mod_gzip/1.3.19.1a': 'opensa 1.0.4',
'opensa/1.0.5 / apache/1.3.27 (win32) (using ihtml/2.20.500)': 'opensa 1.0.5',
'oracle-application-server-10g oracleas-web-cache-10g/10.1.2.0.0 (n': 'oracle application server 10g 10.1.2.0.0',
'oracle-application-server-10g/10.1.2.0.0 oracle-http-server': 'oracle application server 10g 10.1.2.0.0',
'oracle-application-server-10g/10.1.2.0.2 oracle-http-server': 'oracle application server 10g 10.1.2.0.2',
'oracle-application-server-10g oracleas-web-cache-10g/10.1.2.2.0 (tn': 'oracle application server 10g 10.1.2.2.0',
'oracle-application-server-10g/10.1.2.2.0 oracle-http-server': 'oracle application server 10g 10.1.2.2.0',
'oracle-application-server-10g/10.1.3.0.0 oracle-http-server': 'oracle application server 10g 10.1.3.0.0',
'oracle-application-server-10g/10.1.3.1.0 oracle-http-server': 'oracle application server 10g 10.1.3.1.0',
'oracle-application-server-10g/9.0.4.0.0 oracle-http-server': 'oracle application server 10g 9.0.4.0.0',
'oracle-application-server-10g/9.0.4.1.0 oracle-http-server': 'oracle application server 10g 9.0.4.1.0',
'oracle-application-server-10g/9.0.4.2.0 oracle-http-server': 'oracle application server 10g 9.0.4.2.0',
'oracle-application-server-10g/9.0.4.3.0 oracle-http-server': 'oracle application server 10g 9.0.4.3.0',
'oracle9ias/9.0.2.3.0 oracle http server': 'oracle application server 9i 9.0.2.3.0',
'oracle9ias/9.0.2 oracle http server': 'oracle application server 9i 9.0.2',
'oracle9ias/9.0.3.1 oracle http server': 'oracle application server 9i 9.0.3.1',
'orion/2.0.7': 'orion 2.0.7',
'oversee webserver v1.3.18': 'oversee webserver 1.3.18',
'httpd/1.00': 'packetshaper httpd 1.00',
'wg_httpd/1.0(based boa/0.92q)': 'philips netcam 1.4.8 wg_httpd 1.0',
'thttpd/2.20b 10oct00': 'qnap nas-4100 2.26.0517',
'http server 1.0': 'qnap ts-411u 1.2.0.0531',
'resin/3.0.23': 'resin 3.0.23',
'resin/3.0.6': 'resin 3.0.6',
'web-server/3.0': 'ricoh aficio 6002 3.53.3 web-server 3.0',
'roxen/2.2.213': 'roxen 2.2.213',
'roxen/4.5.111-release2': 'roxen 4.5.111',
'roxen/4.5.145-rc2': 'roxen 4.5.145',
'snap appliances, inc./3.1.603': 'snap appliance 3.1.603',
'snap appliance, inc./3.4.803': 'snap appliance 3.4.803',
'snap appliance, inc./3.4.805': 'snap appliance 3.4.805',
'snap appliance, inc./4.0.830': 'snap appliance 4.0.830',
'snap appliance, inc./4.0.854': 'snap appliance 4.0.854',
'snap appliance, inc./4.0.860': 'snap appliance 4.0.860',
'snapstream': 'snapstream digital video recorder',
'netevi/1.09': 'sony snc-rz30 netevi 1.09',
'netevi/2.05': 'sony snc-rz30 netevi 2.05',
'netevi/2.05g': 'sony snc-rz30 netevi 2.05g',
'netevi/2.06': 'sony snc-rz30 netevi 2.06',
'netevi/2.13': 'sony snc-rz30 netevi 2.13',
'netevi/2.14': 'sony snc-rz30 netevi 2.14',
'netevi/2.24': 'sony snc-rz30 netevi 2.24',
'netevi/3.01': 'sony snc-rz30 netevi 3.01',
'netevi/3.02': 'sony snc-rz30 netevi 3.02',
'netevi/3.03': 'sony snc-rz30 netevi 3.03',
'netevi/3.10': 'sony snc-rz30 netevi 3.10',
'netevi/3.10a': 'sony snc-rz30 netevi 3.10a',
'netevi/3.14': 'sony snc-rz30 netevi 3.14',
'netzoom/1.00': 'sony snc-z20 netzoom 1.00',
'squid/2.5.stable5': 'squid 2.5.stable5',
'squid/2.5.stable6': 'squid 2.5.stable6',
'squid/2.5.stable9': 'squid 2.5.stable9',
'squid/2.6.stable13': 'squid 2.6.stable13',
'squid/2.6.stable4': 'squid 2.6.stable4',
'squid/2.6.stable7': 'squid 2.6.stable7',
'stweb/1.3.27 (unix) authmysql/3.1 mod_jk/1.1.0 php/3.0.18 php/4.2.3 with': 'stweb 1.3.27',
'sun-java-system-web-server/6.1': 'sun java system web server 6.1',
'sun-java-system-web-server/7.0': 'sun java system web server 7.0',
'sun-one-web-server/6.1': 'sun one web server 6.1',
'smssmtphttp': 'symantec mail security for smtp',
'tcl-webserver/3.5.1 may 27, 2004': 'tclhttpd 3.5.1',
'theserver/2.21l': 'theserver 2.21l',
'userland frontier/9.0.1-winnt': 'userland frontier 9.0.1',
'userland frontier/9.5-winnt': 'userland frontier 9.5',
'realvnc/4.0': 'vnc server enterprise edition e4.2.5',
'vswebserver/01.00 index/01.02.01': 'vs web server 01.00.00',
'virtuoso/05.00.3021 (linux) i686-generic-linux-glibc23-32 vdb': 'virtuoso 5.0.3',
'wdaemon/9.6.1': 'wdaemon 9.6.1',
'webrick/1.3.1 (ruby/1.9.0/2006-07-13)': 'webrick 1.3.1',
'wn/2.4.7': 'wn server 2.4.7',
'allegro-software-rompager/3.06b1': 'xerox docuprint n4025 allegro rompager 3.06b1',
'spyglass_microserver/2.01fc1': 'xerox phaser 6200',
'yaws/1.65 yet another web server': 'yaws 1.65',
'yaws/1.68 yet another web server': 'yaws 1.68',
'yaws/1.72 yet another web server': 'yaws 1.72',
'yaws/sys_6.0.5 yet another web server': 'yaws 6.0.5',
'zeus/4.3': 'zeus 4.3',
'zeus/4.41': 'zeus 4.41',
'unknown/0.0 upnp/1.0 conexant-emweb/r6_1_0': 'zoom adsl',
'zope/(zope 2.10.4-final, python 2.4.4, linux2) zserver/1.1 plone/3.0.1': 'zope 2.10.4',
'zope/(zope 2.5.0 (binary release, python 2.1, linux2-x86), python 2.1.2,': 'zope 2.5.0',
'zope/(zope 2.5.1 (source release, python 2.1, linux2), python 2.1.3,': 'zope 2.5.1',
'zope/(zope 2.6.0 (binary release, python 2.1, linux2-x86), python 2.1.3,': 'zope 2.6.0',
'zope/(zope 2.6.1 (source release, python 2.1, linux2), python 2.2.3,': 'zope 2.6.1',
'zope/(zope 2.6.4 (source release, python 2.1, linux2), python 2.2.3,': 'zope 2.6.4',
'zope/(zope 2.7.4-0, python 2.3.5, linux2) zserver/1.1': 'zope 2.7.4',
'squid/2.5.stable12': 'zope 2.7.4',
'zope/(zope 2.7.5-final, python 2.3.4, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.5',
'zope/(zope 2.7.5-final, python 2.3.5, linux2) zserver/1.1': 'zope 2.7.5',
'zope/(zope 2.7.6-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.6',
'zope/(zope 2.7.6-final, python 2.4.0, linux2) zserver/1.1': 'zope 2.7.6',
'zope/(zope 2.7.7-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.7',
'zope/(zope 2.7.7-final, python 2.4.4, linux2) zserver/1.1': 'zope 2.7.7',
'zope/(zope 2.7.8-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.5': 'zope 2.7.8',
'zope/(zope 2.7.9-final, python 2.3.5, linux2) zserver/1.1 plone/2.0.4': 'zope 2.7.9',
'zope/(zope 2.8.0-a0, python 2.3.4, linux2) zserver/1.1 plone/2.0-rc3': 'zope 2.8.0',
'zope/(zope 2.8.2-final, python 2.3.5, linux2) zserver/1.1 plone/unknown': 'zope 2.8.2',
'zope/(zope 2.8.4-final, python 2.3.5, linux2) zserver/1.1 plone/unknown': 'zope 2.8.4',
'zope/(zope 2.8.6-final, python 2.3.5, linux2) zserver/1.1 plone/unknown': 'zope 2.8.6',
'zope/(zope 2.8.6-final, python 2.4.4, linux2) zserver/1.1 plone/unknown': 'zope 2.8.6',
'zope/(zope 2.8.7-final, python 2.4.4, linux2) zserver/1.1 plone/unknown': 'zope 2.8.7',
'zope/(zope 2.9.2-, python 2.4.3, linux2) zserver/1.1 plone/unknown': 'zope 2.9.2',
'zope/(zope 2.9.3-, python 2.4.0, linux2) zserver/1.1': 'zope 2.9.3',
'zope/(zope 2.9.3-, python 2.4.2, linux2) zserver/1.1 plone/2.5': 'zope 2.9.3',
'zope/(zope 2.9.5-final, python 2.4.3, linux2) zserver/1.1 plone/2.5.1': 'zope 2.9.5',
'zope/(zope 2.9.6-final, python 2.4.3, linux2) zserver/1.1 plone/2.5.1': 'zope 2.9.6',
'zope/(zope 2.9.6-final, python 2.4.3, linux2) zserver/1.1 plone/2.5.2': 'zope 2.9.6',
'zope/(zope 2.9.7-final, python 2.4.4, linux2) zserver/1.1': 'zope 2.9.7',
'zope/(zope 2.9.8-final, python 2.4.4, linux2) zserver/1.1': 'zope 2.9.8',
'rompager/4.07 upnp/1.0': 'zyxel zywall 10w rompager 4.07',
'and-httpd/0.99.11': 'and-httpd 0.99.11',
'bozohttpd/20060517': 'bozohttpd 20060517',
'bozohttpd/20080303': 'bozohttpd 20080303',
'dwhttpd/4.0.2a7a (inso': 'dwhttpd 4.0.2a7a',
'dwhttpd/4.1a6 (inso': 'dwhttpd 4.1a6',
'dwhttpd/4.2a7 (inso': 'dwhttpd 4.2a7',
'emule': 'emule 0.48a',
'ns-firecat/1.0.x': 'firecat 1.0.0 beta',
'fnord/1.8a': 'fnord 1.8a',
'lighttpd/1.4.13': 'lighttpd 1.4.13',
'lighttpd/1.4.16': 'lighttpd 1.4.16',
'lighttpd/1.4.18': 'lighttpd 1.4.18',
'lighttpd/1.4.19': 'lighttpd 1.4.19',
'lighttpd/1.4.22': 'lighttpd 1.4.22',
'lighttpd/1.5.0': 'lighttpd 1.5.0',
'nginx/0.5.19': 'nginx 0.5.19',
'nginx/0.5.30': 'nginx 0.5.30',
'nginx/0.5.31': 'nginx 0.5.31',
'nginx/0.5.32': 'nginx 0.5.32',
'nginx/0.5.33': 'nginx 0.5.33',
'nginx/0.5.35': 'nginx 0.5.35',
'nginx/0.6.13': 'nginx 0.6.13',
'nginx/0.6.16': 'nginx 0.6.16',
'nginx/0.6.20': 'nginx 0.6.20',
'nginx/0.6.31': 'nginx 0.6.26',
'nostromo 1.9.1': 'nostromo 1.9.1',
'publicfile': 'publicfile',
'thttpd/2.19-mx apr 25 2002': 'thttpd 2.19-mx',
'thttpd/2.19-mx dec 2 2002': 'thttpd 2.19-mx',
'thttpd/2.19-mx jan 24 2006': 'thttpd 2.19-mx',
'thttpd/2.19-mx oct 20 2003': 'thttpd 2.19-mx',
'thttpd/2.23beta1 26may2002': 'thttpd 2.23beta1',
'thttpd/2.24 26oct2003': 'thttpd 2.24',
'thttpd/2.26 ??apr2004': 'thttpd 2.26',
'vqserver/1.9.56 the world\'s most friendly web server': 'vqserver 1.9.56',
'webcamxp': 'webcamxp pro 2007 3.96.000 beta',
}
windows_hints = ['microsoft', 'windows', 'win32']
mac_os_hints = ['macos']
linux_hints = ['suse', 'linux', 'debian', 'solaris', 'red hat', 'unix', 'ubuntu', 'centos']
hosting_hints = ['host', 'hosting']
ftp_servers = {
'crushftp': '*',
'glftpd': 'unix',
'goanywhere ': 'unix',
'proftpd': '*',
'pro-ftpd ': '*',
'pure-ftpd': 'unix',
'pureftpd': 'unix',
'slimftpd ': 'windows',
'slim-ftpd ': 'windows',
'vsftpd ': 'unix',
'wu-ftpd': 'unix',
'wuftpd ': 'unix',
'crushftp': '*',
'alftp': 'windows',
'cerberus ': 'windows',
'completeftp': 'windows',
'filezilla': '*',
'logicaldoc': '*',
'iis': 'windows',
'naslite': 'unix',
'syncplify': 'windows',
'sysax': 'windows',
'war ftp': 'windows',
'ws ftp': 'windows',
'ncftpd': 'unix',
}
smtp_servers = {
'gws': 'google web services',
'ncftpd': 'unix',
'agorum': 'unix',
'atmail': 'unix',
'axigen': 'unix',
'bongo': 'unix',
'citadel': 'unix',
'contactoffice': 'unix',
'communigate': 'unix',
'courier': 'unix',
'critical path': 'unix',
'imail': 'unix',
'eudora': 'unix',
'evo': 'unix',
'exim': 'unix',
'firstclass': 'unix',
'gammadyne': 'unix',
'gordano': 'unix',
'haraka': 'unix',
'hmailserver': 'unix',
'ibm lotus domino': 'unix',
'icewarp': 'unix',
'ipswitch': 'unix',
'ironport': 'unix',
'james': 'unix',
'kerio': 'unix',
'magicmail': 'unix',
'mailenable': 'unix',
'mailtraq': 'unix',
'mdaemon': 'windows',
'mercury': 'unix',
'meta1': 'unix',
'microsoft': 'windows',
'exchange': 'windows',
'mmdf': 'unix',
'momentum': 'unix',
'groupwise': 'unix',
'netmail': 'unix',
'opensmtpd': 'unix',
'openwave': 'unix',
'open-xchange': 'unix',
'beehive': 'unix',
'oracle': 'unix',
'port25': 'unix',
'postfix': 'unix',
'postmaster': 'unix',
'qmail': 'unix',
'qpsmtpd': 'unix',
'scalix': 'unix',
'sendmail': 'unix',
'slmail pro': 'unix',
'smail': 'unix',
'sparkengine': 'unix',
'smtp proxy': 'unix',
'strongmail': 'unix',
'sun java system': 'unix',
'synovel collabsuite': 'unix',
'wingate': 'windows',
'xmail': 'unix',
'xms': 'unix',
'zarafa': 'unix',
'zimbra': 'unix',
'zmailer': 'unix',
}
| |
import tensorflow as tf
import numpy as np
import math
# ======================================================h===================== #
# TensorFlow implementation of Text Boxes encoding / decoding.
# =========================================================================== #
def tf_text_bboxes_encode_layer(bboxes,
anchors_layer, num,
matching_threshold=0.5,
prior_scaling=[0.1, 0.1, 0.2, 0.2],
dtype=tf.float32):
"""
Encode groundtruth labels and bounding boxes using Textbox anchors from
one layer.
Arguments:
bboxes: Nx4 Tensor(float) with bboxes relative coordinates;
anchors_layer: Numpy array with layer anchors;
matching_threshold: Threshold for positive match with groundtruth bboxes;
prior_scaling: Scaling of encoded coordinates.
Return:
(target_localizations, target_scores): Target Tensors.
# thisi is a binary problem, so target_score and tartget_labels are same.
"""
# Anchors coordinates and volume.
yref, xref, href, wref = anchors_layer
ymin = yref - href / 2.
xmin = xref - wref / 2.
ymax = yref + href / 2.
xmax = xref + wref / 2.
vol_anchors = (xmax - xmin) * (ymax - ymin)
# Initialize tensors...
shape = (yref.shape[0], yref.shape[1], yref.shape[2], href.size)
# all follow the shape(feat.size, feat.size, 2, 6)
#feat_labels = tf.zeros(shape, dtype=tf.int64)
feat_scores = tf.zeros(shape, dtype=dtype)
feat_ymin = tf.zeros(shape, dtype=dtype)
feat_xmin = tf.zeros(shape, dtype=dtype)
feat_ymax = tf.ones(shape, dtype=dtype)
feat_xmax = tf.ones(shape, dtype=dtype)
def jaccard_with_anchors(bbox):
"""
Compute jaccard score between a box and the anchors.
"""
int_ymin = tf.maximum(ymin, bbox[0])
int_xmin = tf.maximum(xmin, bbox[1])
int_ymax = tf.minimum(ymax, bbox[2])
int_xmax = tf.minimum(xmax, bbox[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
# Volumes.
inter_vol = h * w
union_vol = vol_anchors - inter_vol \
+ (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
jaccard = tf.div(inter_vol, union_vol)
return jaccard
"""
# never use in Textbox
def intersection_with_anchors(bbox):
'''
Compute intersection between score a box and the anchors.
'''
int_ymin = tf.maximum(ymin, bbox[0])
int_xmin = tf.maximum(xmin, bbox[1])
int_ymax = tf.minimum(ymax, bbox[2])
int_xmax = tf.minimum(xmax, bbox[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
inter_vol = h * w
scores = tf.div(inter_vol, vol_anchors)
return scores
"""
def condition(i, feat_scores,
feat_ymin, feat_xmin, feat_ymax, feat_xmax):
"""Condition: check label index.
"""
#r = tf.less(i, tf.shape(bboxes)[0])
r = tf.less(i, num)
return r
def body(i, feat_scores,feat_ymin, feat_xmin, feat_ymax, feat_xmax):
"""Body: update feature labels, scores and bboxes.
Follow the original SSD paper for that purpose:
- assign values when jaccard > 0.5;
- only update if beat the score of other bboxes.
"""
# Jaccard score.
bbox = bboxes[i]
jaccard = jaccard_with_anchors(bbox)
# Mask: check threshold + scores + no annotations + num_classes.
mask = tf.greater(jaccard, feat_scores)
mask = tf.logical_and(mask, tf.greater(jaccard, matching_threshold))
#mask = tf.logical_and(mask, feat_scores > -0.5)
#mask = tf.logical_and(mask, label < num_classes)
imask = tf.cast(mask, tf.int64)
fmask = tf.cast(mask, dtype)
# Update values using mask.
#feat_labels = imask * label + (1 - imask) * feat_labels
feat_scores = tf.where(mask, jaccard, feat_scores)
feat_ymin = fmask * bbox[0] + (1 - fmask) * feat_ymin
feat_xmin = fmask * bbox[1] + (1 - fmask) * feat_xmin
feat_ymax = fmask * bbox[2] + (1 - fmask) * feat_ymax
feat_xmax = fmask * bbox[3] + (1 - fmask) * feat_xmax
# Check no annotation label: ignore these anchors...
#interscts = intersection_with_anchors(bbox)
#mask = tf.logical_and(interscts > ignore_threshold,
# label == no_annotation_label)
# Replace scores by -1.
#feat_scores = tf.where(mask, -tf.cast(mask, dtype), feat_scores)
return [i+1, feat_scores,
feat_ymin, feat_xmin, feat_ymax, feat_xmax]
# Main loop definition.
i = 0
[i,feat_scores,
feat_ymin, feat_xmin,
feat_ymax, feat_xmax] = tf.while_loop(condition, body,
[i, feat_scores,
feat_ymin, feat_xmin,
feat_ymax, feat_xmax])
'''
for i, bbox in enumerate(tf.unpack(bboxes, axis=0)):
[i,feat_scores,feat_ymin,
feat_xmin, feat_ymax, feat_xmax] = body(i, feat_scores,
feat_ymin, feat_xmin,
feat_ymax, feat_xmax,bbox)
'''
# Transform to center / size.
feat_cy = (feat_ymax + feat_ymin) / 2.
feat_cx = (feat_xmax + feat_xmin) / 2.
feat_h = feat_ymax - feat_ymin
feat_w = feat_xmax - feat_xmin
# Encode features.
feat_cy = (feat_cy - yref) / href / prior_scaling[0]
feat_cx = (feat_cx - xref) / wref / prior_scaling[1]
feat_h = tf.log(feat_h / href) / prior_scaling[2]
feat_w = tf.log(feat_w / wref) / prior_scaling[3]
# Use SSD ordering: x / y / w / h instead of ours.
feat_localizations = tf.stack([feat_cx, feat_cy, feat_w, feat_h], axis=-1)
return feat_localizations, feat_scores
def tf_text_bboxes_encode(bboxes,
anchors, num,
matching_threshold=0.5,
prior_scaling=[0.1, 0.1, 0.2, 0.2],
dtype=tf.float32,
scope='text_bboxes_encode'):
"""Encode groundtruth labels and bounding boxes using SSD net anchors.
Encoding boxes for all feature layers.
Arguments:
bboxes: Nx4 Tensor(float) with bboxes relative coordinates;
anchors: List of Numpy array with layer anchors;
matching_threshold: Threshold for positive match with groundtruth bboxes;
prior_scaling: Scaling of encoded coordinates.
Return:
(target_labels, target_localizations, target_scores):
Each element is a list of target Tensors.
"""
with tf.name_scope('text_bboxes_encode'):
target_labels = []
target_localizations = []
target_scores = []
for i, anchors_layer in enumerate(anchors):
with tf.name_scope('bboxes_encode_block_%i' % i):
t_loc, t_scores = \
tf_text_bboxes_encode_layer(bboxes, anchors_layer, num,
matching_threshold,
prior_scaling, dtype)
target_localizations.append(t_loc)
target_scores.append(t_scores)
return target_localizations, target_scores
## produce anchor for one layer
# each feature point has 12 default textboxes(6 boxes + 6 offsets boxes)
# aspect ratios = (1,2,3,5,7,10)
# feat_size :
# conv4_3 ==> 38 x 38
# fc7 ==> 19 x 19
# conv6_2 ==> 10 x 10
# conv7_2 ==> 5 x 5
# conv8_2 ==> 3 x 3
# pool6 ==> 1 x 1
def textbox_anchor_one_layer(img_shape,
feat_size,
ratios,
scale,
offset = 0.5,
dtype=np.float32):
# Follow the papers scheme
# 12 ahchor boxes with out sk' = sqrt(sk * sk+1)
y, x = np.mgrid[0:feat_size[0], 0:feat_size[1]] + 0.5
y_offset = y + offset
y = y.astype(dtype) / feat_size[0]
x = x.astype(dtype) / feat_size[1]
x_offset = x
y_offset = y_offset.astype(dtype) / feat_size[1]
x_out = np.stack((x, x_offset), -1)
y_out = np.stack((y, y_offset), -1)
y_out = np.expand_dims(y_out, axis=-1)
x_out = np.expand_dims(x_out, axis=-1)
#
num_anchors = 6
h = np.zeros((num_anchors, ), dtype=dtype)
w = np.zeros((num_anchors, ), dtype=dtype)
for i ,r in enumerate(ratios):
h[i] = scale / math.sqrt(r)
w[i] = scale * math.sqrt(r)
return y_out, x_out, h, w
## produce anchor for all layers
def textbox_achor_all_layers(img_shape,
layers_shape,
anchor_ratios,
scales,
offset=0.5,
dtype=np.float32):
"""
Compute anchor boxes for all feature layers.
"""
layers_anchors = []
for i, s in enumerate(layers_shape):
anchor_bboxes = textbox_anchor_one_layer(img_shape, s,
anchor_ratios,
scales[i],
offset=offset, dtype=dtype)
layers_anchors.append(anchor_bboxes)
return layers_anchors
if __name__ == "__main__":
scales = [0.2, 0.34, 0.48, 0.62, 0.76, 0.90]
y_out, x_out, h, w = textbox_anchor_one_layer((300, 300), (38,38), (1,2,3,5,7,10), scale=0.2)
print y_out.shape, x_out.shape, h.shape, w.shape
ymin = y_out - h / 2.
print ymin.shape
yref, xref, href, wref = y_out, x_out, h, w
ymin = yref - href / 2.
xmin = xref - wref / 2.
ymax = yref + href / 2.
xmax = xref + wref / 2.
vol_anchors = (xmax - xmin) * (ymax - ymin)
print href.size
| |
###############################################################################
# actionAngle: a Python module to calculate actions, angles, and frequencies
#
# class: actionAngleIsochrone
#
# Calculate actions-angle coordinates for the Isochrone potential
#
# methods:
# __call__: returns (jr,lz,jz)
# actionsFreqs: returns (jr,lz,jz,Or,Op,Oz)
# actionsFreqsAngles: returns (jr,lz,jz,Or,Op,Oz,ar,ap,az)
#
###############################################################################
import copy
import warnings
import numpy
from .actionAngle import actionAngle
from ..potential import IsochronePotential
from ..util import galpyWarning, conversion
class actionAngleIsochrone(actionAngle):
"""Action-angle formalism for the isochrone potential, on the Jphi, Jtheta system of Binney & Tremaine (2008)"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngleIsochrone object
INPUT:
Either:
b= scale parameter of the isochrone parameter (can be Quantity)
ip= instance of a IsochronePotential
ro= distance from vantage point to GC (kpc; can be Quantity)
vo= circular velocity at ro (km/s; can be Quantity)
OUTPUT:
instance
HISTORY:
2013-09-08 - Written - Bovy (IAS)
"""
actionAngle.__init__(self,
ro=kwargs.get('ro',None),vo=kwargs.get('vo',None))
if not 'b' in kwargs and not 'ip' in kwargs: #pragma: no cover
raise IOError("Must specify b= for actionAngleIsochrone")
if 'ip' in kwargs:
ip= kwargs['ip']
if not isinstance(ip,IsochronePotential): #pragma: no cover
raise IOError("'Provided ip= does not appear to be an instance of an IsochronePotential")
# Check the units
self._pot= ip
self._check_consistent_units()
self.b= ip.b
self.amp= ip._amp
else:
self.b= conversion.parse_length(kwargs['b'],ro=self._ro)
rb= numpy.sqrt(self.b**2.+1.)
self.amp= (self.b+rb)**2.*rb
self._c= False
ext_loaded= False
if ext_loaded and (('c' in kwargs and kwargs['c'])
or not 'c' in kwargs): #pragma: no cover
self._c= True
else:
self._c= False
if not self._c:
self._ip= IsochronePotential(amp=self.amp,b=self.b)
#Define _pot, because some functions that use actionAngle instances need this
self._pot= IsochronePotential(amp=self.amp,b=self.b)
# Check the units
self._check_consistent_units()
return None
def _evaluate(self,*args,**kwargs):
"""
NAME:
__call__ (_evaluate)
PURPOSE:
evaluate the actions (jr,lz,jz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
OUTPUT:
(jr,lz,jz)
HISTORY:
2013-09-08 - Written - Bovy (IAS)
"""
if len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
if isinstance(R,float):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
E= self._ip(R,z)+vR**2./2.+vT**2./2.+vz**2./2.
L= numpy.sqrt(L2)
#Actions
Jphi= Lz
Jz= L-numpy.fabs(Lz)
Jr= self.amp/numpy.sqrt(-2.*E)\
-0.5*(L+numpy.sqrt((L2+4.*self.amp*self.b)))
return (Jr,Jphi,Jz)
def _actionsFreqs(self,*args,**kwargs):
"""
NAME:
actionsFreqs (_actionsFreqs)
PURPOSE:
evaluate the actions and frequencies (jr,lz,jz,Omegar,Omegaphi,Omegaz)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
OUTPUT:
(jr,lz,jz,Omegar,Omegaphi,Omegaz)
HISTORY:
2013-09-08 - Written - Bovy (IAS)
"""
if len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
if isinstance(R,float):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
E= self._ip(R,z)+vR**2./2.+vT**2./2.+vz**2./2.
L= numpy.sqrt(L2)
#Actions
Jphi= Lz
Jz= L-numpy.fabs(Lz)
Jr= self.amp/numpy.sqrt(-2.*E)\
-0.5*(L+numpy.sqrt((L2+4.*self.amp*self.b)))
#Frequencies
Omegar= (-2.*E)**1.5/self.amp
Omegaz= 0.5*(1.+L/numpy.sqrt(L2+4.*self.amp*self.b))*Omegar
Omegaphi= copy.copy(Omegaz)
indx= Lz < 0.
Omegaphi[indx]*= -1.
return (Jr,Jphi,Jz,Omegar,Omegaphi,Omegaz)
def _actionsFreqsAngles(self,*args,**kwargs):
"""
NAME:
actionsFreqsAngles (_actionsFreqsAngles)
PURPOSE:
evaluate the actions, frequencies, and angles (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
OUTPUT:
(jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
HISTORY:
2013-09-08 - Written - Bovy (IAS)
"""
if len(args) == 5: #R,vR.vT, z, vz pragma: no cover
raise IOError("You need to provide phi when calculating angles")
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
phi= self._eval_phi
if isinstance(R,float):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
phi= numpy.array([phi])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
E= self._ip(R,z)+vR**2./2.+vT**2./2.+vz**2./2.
L= numpy.sqrt(L2)
#Actions
Jphi= Lz
Jz= L-numpy.fabs(Lz)
Jr= self.amp/numpy.sqrt(-2.*E)\
-0.5*(L+numpy.sqrt((L2+4.*self.amp*self.b)))
#Frequencies
Omegar= (-2.*E)**1.5/self.amp
Omegaz= 0.5*(1.+L/numpy.sqrt(L2+4.*self.amp*self.b))*Omegar
Omegaphi= copy.copy(Omegaz)
indx= Lz < 0.
Omegaphi[indx]*= -1.
#Angles
c= -self.amp/2./E-self.b
e2= 1.-L2/self.amp/c*(1.+self.b/c)
e= numpy.sqrt(e2)
if self.b == 0.:
coseta= 1/e*(1.-numpy.sqrt(R**2.+z**2.)/c)
else:
s= 1.+numpy.sqrt(1.+(R**2.+z**2.)/self.b**2.)
coseta= 1/e*(1.-self.b/c*(s-2.))
pindx= (coseta > 1.)
coseta[pindx]= 1.
pindx= (coseta < -1.)
coseta[pindx]= -1.
eta= numpy.arccos(coseta)
costheta= z/numpy.sqrt(R**2.+z**2.)
sintheta= R/numpy.sqrt(R**2.+z**2.)
vrindx= (vR*sintheta+vz*costheta) < 0.
eta[vrindx]= 2.*numpy.pi-eta[vrindx]
angler= eta-e*c/(c+self.b)*numpy.sin(eta)
tan11= numpy.arctan(numpy.sqrt((1.+e)/(1.-e))*numpy.tan(0.5*eta))
tan12= numpy.arctan(numpy.sqrt((1.+e+2.*self.b/c)/(1.-e+2.*self.b/c))*numpy.tan(0.5*eta))
vzindx= (-vz*sintheta+vR*costheta) > 0.
tan11[tan11 < 0.]+= numpy.pi
tan12[tan12 < 0.]+= numpy.pi
pindx= (Lz/L > 1.)
Lz[pindx]= L[pindx]
pindx= (Lz/L < -1.)
Lz[pindx]= -L[pindx]
sini= numpy.sqrt(L**2.-Lz**2.)/L
tani= numpy.sqrt(L**2.-Lz**2.)/Lz
sinpsi= costheta/sini
pindx= (sinpsi > 1.)*numpy.isfinite(sinpsi)
sinpsi[pindx]= 1.
pindx= (sinpsi < -1.)*numpy.isfinite(sinpsi)
sinpsi[pindx]= -1.
psi= numpy.arcsin(sinpsi)
psi[vzindx]= numpy.pi-psi[vzindx]
# For non-inclined orbits, we set Omega=0 by convention
psi[True^numpy.isfinite(psi)]= phi[True^numpy.isfinite(psi)]
psi= psi % (2.*numpy.pi)
anglez= psi+Omegaz/Omegar*angler\
-tan11-1./numpy.sqrt(1.+4*self.amp*self.b/L2)*tan12
sinu= z/R/tani
pindx= (sinu > 1.)*numpy.isfinite(sinu)
sinu[pindx]= 1.
pindx= (sinu < -1.)*numpy.isfinite(sinu)
sinu[pindx]= -1.
u= numpy.arcsin(sinu)
u[vzindx]= numpy.pi-u[vzindx]
# For non-inclined orbits, we set Omega=0 by convention
u[True^numpy.isfinite(u)]= phi[True^numpy.isfinite(u)]
Omega= phi-u
anglephi= Omega
anglephi[indx]-= anglez[indx]
anglephi[True^indx]+= anglez[True^indx]
angler= angler % (2.*numpy.pi)
anglephi= anglephi % (2.*numpy.pi)
anglez= anglez % (2.*numpy.pi)
return (Jr,Jphi,Jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez)
def _EccZmaxRperiRap(self,*args,**kwargs):
"""
NAME:
_EccZmaxRperiRap
PURPOSE:
evaluate the eccentricity, maximum height above the plane, peri- and apocenter for an isochrone potential
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
OUTPUT:
(e,zmax,rperi,rap)
HISTORY:
2017-12-22 - Written - Bovy (UofT)
"""
if len(args) == 5: #R,vR.vT, z, vz pragma: no cover
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
if isinstance(R,float):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
E= self._ip(R,z)+vR**2./2.+vT**2./2.+vz**2./2.
if self.b == 0:
warnings.warn("zmax for point-mass (b=0) isochrone potential is only approximate, because it assumes that zmax is attained at rap, which is not necessarily the case",galpyWarning)
a= -self.amp/2./E
me2= L2/self.amp/a
e= numpy.sqrt(1.-me2)
rperi= a*(1.-e)
rap= a*(1.+e)
else:
smin= 0.5*((2.*E-self.amp/self.b)\
+numpy.sqrt((2.*E-self.amp/self.b)**2.
+2.*E*(4.*self.amp/self.b+L2/self.b**2.)))/E
smax= 2.-self.amp/E/self.b-smin
rperi= smin*numpy.sqrt(1.-2./smin)*self.b
rap= smax*numpy.sqrt(1.-2./smax)*self.b
return ((rap-rperi)/(rap+rperi),rap*numpy.sqrt(1.-Lz**2./L2),
rperi,rap)
| |
from traceback import print_exc
from collections import OrderedDict, Mapping
from decimal import Decimal
import sqlparse
import json
import datetime
import numpy as np
import matplotlib.pyplot as plt
from dateutil import parser as dateutil
import progressbar as pb # import ProgressBar, Bar, Percentage, ETA, RotatingMarker
from django.core.exceptions import ImproperlyConfigured
from django.db import DatabaseError, transaction
from django.core.exceptions import FieldError
from django.db.models import FieldDoesNotExist
from django.db.models import Model
from pug.dj import db as djdb # FIXME: confusing name (too similar to common `import as` for django.db)
from pug.nlp import db # FIXME: too similar to pug.db
from pug.nlp import util
import sqlserver as sql
DEFAULT_DB_ALIAS = None # 'default'
DEFAULT_APP_NAME = None
try:
from django.db import models, connection, connections, router
from django.conf import settings
DEFAULT_APP_NAME = settings.INSTALLED_APPS[-1].split('.')[-1]
except:
import traceback
print traceback.format_exc()
print 'WARNING: The module named %r from file %r' % (__name__, __file__)
print ' can only be used within a Django project!'
print ' Though the module was imported, some of its functions may raise exceptions.'
types_varchar = ['nvarchar', 'varchar', 'sysname'] # `sysname` stores MS-TSQL object names and is equivalent to `nvarchar(128)`
types_not_countable = ['text', 'image', 'ntext']
types_not_aggregatable = types_not_countable + ['bit', 'uniqueidentifier']
def get_app_meta(apps=None, app_filter=lambda x: x.startswith('sec_') or x.startswith('siica_'), app_exclude_filter=None, verbosity=0, save=True):
apps = util.listify(apps or djdb.get_app(apps))
meta = []
for app in apps:
if filter_reject(app, app_filter, app_exclude_filter):
continue
meta += [get_db_meta(app=app, verbosity=verbosity)]
if save:
try:
with open('db_meta_%s.json' % app, 'w') as fpout:
json.dump(make_serializable(meta[-1]), fpout, indent=4)
except:
print_exc()
if save:
try:
with open('db_meta_all_apps.json', 'w') as fpout:
jsonifiable_data = make_serializable(meta)
json.dump(jsonifiable_data, fpout, indent=4)
except:
print_exc()
return meta
def filter_reject(s, accept_filters, reject_filters=None):
if callable(accept_filters) and callable(reject_filters):
if (accept_filters and not accept_filters(s)) and (not reject_filters or not reject_filters(s)):
return True
else:
return False
elif isinstance(accept_filters, (tuple, list, set)) and (not reject_filters or callable(reject_filters)):
return any(filter_reject(s, af, reject_filters) for af in accept_filters)
elif (not accept_filters or callable(accept_filters)) and isinstance(reject_filters, (tuple, list, set)):
return any(filter_reject(s, accept_filters, rf) for rf in reject_filters)
elif (not reject_filters or callable(reject_filters)) and isinstance(accept_filters, (tuple, list, set)):
return any(filter_reject(s, af, reject_filters) for af in accept_filters)
elif isinstance(accept_filters, basestring) and (not reject_filters or callable(reject_filters)):
return filter_reject(s, lambda x: x.startswith(accept_filters), reject_filters)
elif (not accept_filters or callable(accept_filters)) and isinstance(reject_filters, basestring):
return filter_reject(s, accept_filters, lambda x: x.startswith(reject_filters))
return None
def load_app_meta(apps=None, app_filter=['sec_', 'siica_'], app_exclude_filter=None):
apps = apps or djdb.get_app(apps)
meta = {}
for app in apps:
# if filter_reject(app, app_filter, app_exclude_filter):
# continue
if (app_filter and not any(app.startswith(af) for af in app_filter)) and (
not app_exclude_filter or not any(app.startswith(rf) for rf in app_exclude_filter)):
continue
with open('db_meta_%s.json' % app, 'r') as fpin:
m = json.load(fpin)
for table_name, table_meta in m.iteritems():
table_name = app + '.' + table_name
for field_name, field_meta in table_meta.iteritems():
meta[table_name + '.' + field_name] = field_meta
return meta
def meta_bar_chart(series=None, N=20):
"Each column in the series is a dict of dicts"
if not series or isinstance(series, basestring):
series = json.load(load_app_meta)
if isinstance(series, Mapping) and isinstance(series.values()[0], Mapping):
rows_received = series['# Received'].items()
elif isinstance(series, Mapping):
rows_received = series.items()
else:
rows_received = list(series)
#rows = sorted(rows, key=operator.itemgetter(1), reverse=True)
rows = sorted(rows_received, key=lambda x: x[1], reverse=True)
received_names, received_qty = zip(*rows)
ra_qty = [(series['Qty in RA'].get(name, 0.) or 0.) for name in received_names]
# percent = [100. - 100. * (num or 0.) / (den or 1.) for num, den in zip(received_qty, ra_qty)]
# only care about the top 30 model numbers in terms of quantity
#ind = range(N)
figs = []
figs += [plt.figure()]
ax = figs[-1].add_subplot(111)
ax.set_ylabel('# Units Returned')
ax.set_title('Most-Returned LCDTV Models 2013-present')
x = np.arange(N)
bars1 = ax.bar(x, received_qty[:N], color='b', width=.4, log=1)
bars2 = ax.bar(x+.4, ra_qty[:N], color='g', width=.4, log=1)
ax.set_xticks(range(N))
ax.set_xticklabels(received_names[:N], rotation=35)
ax.grid(True)
ax.legend((bars1[0], bars2[0]), ('# in RA', '# Received'), 'center right')
figs[-1].show()
#fig.autofmt_xdate()
def get_db_meta(app=DEFAULT_APP_NAME, db_alias=None, table=None, verbosity=0, column=None):
"""Return a dict of dicts containing metadata about the database tables associated with an app
TODO: allow multiple apps
>>> get_db_meta('crawler', db_alias='default', table='crawler_wikiitem') # doctest: +ELLIPSIS
OrderedDict([('WikiItem', OrderedDict([('Meta', OrderedDict([('primary_key', None), ('count', 1332), ('db_table', u'crawler_wikiitem')])), (u'id', OrderedDict([('name', u'id'), ('type', ...
"""
if verbosity > 0:
print 'Looking for app %r.' % (app, )
if app and isinstance(app, basestring):
app = djdb.get_app(app, verbosity=verbosity)
else:
app = djdb.get_app('')
model_names = list(mc.__name__ for mc in models.get_models(app))
if verbosity > 0:
print 'Found %d models for app %r.' % (len(model_names), app)
meta = OrderedDict()
# inspectdb uses: for table_name in connection.introspection.table_names(cursor):
for model_name in model_names:
model = djdb.get_model(model_name, app=app)
if db_alias:
model_db_alias = db_alias
else:
model_db_alias = router.db_for_read(model)
queryset = model.objects
if model_db_alias:
queryset = queryset.using(model_db_alias)
if model and table is not None and isinstance(table, basestring):
if model._meta.db_table != table:
if verbosity>1:
print 'Skipped model named %s with db table names %s.' % (model_name, model._meta.db_table)
continue
elif callable(table):
if not table(model._meta.db_table):
if verbosity>1:
print 'Skipped model named %s with db table names %s.' % (model_name, model._meta.db_table)
continue
count = None
try:
if verbosity > 1:
print 'Trying to count records in model %r and db_alias %r' % (model, model_db_alias)
count = queryset.count()
except DatabaseError as e:
if verbosity > 0:
print_exc()
print "DatabaseError: Unable to count records for model '%s' (%s) because of %s." % (model.__name__, repr(model), e)
transaction.rollback()
except:
print_exc()
print 'Connection doesnt exist?'
meta[model_name] = OrderedDict()
meta[model_name]['Meta'] = OrderedDict()
meta[model_name]['Meta']['primary_key'] = None
meta[model_name]['Meta']['count'] = count
meta[model_name]['Meta']['db_table'] = model._meta.db_table
if verbosity > 1:
print '%s.Meta = %r' % (model_name, meta[model_name]['Meta'])
# inspectdb uses: connection.introspection.get_table_description(cursor, table_name)
properties_of_fields = sql.get_meta_dicts(cursor=model_db_alias, table=meta[model_name]['Meta']['db_table'], verbosity=verbosity)
model_meta = OrderedDict((field['name'], field) for field in properties_of_fields)
if verbosity > 1:
print '-' * 20 + model_name + '-' * 20
db_primary_keys = [field['name'] for field in properties_of_fields if field['primary_key']]
if len(db_primary_keys) == 1:
meta[model_name]['Meta']['primary_key'] = db_primary_keys[0]
# augment model_meta with additional stats, but only if there are enough rows to get statistics
model_meta = augment_model_meta(model, model_db_alias, model_meta, column_name_filters=column, count=count, verbosity=verbosity)
if verbosity > 1:
print model_meta
meta[model_name].update(model_meta)
return meta
def get_model_meta(model, app=DEFAULT_APP_NAME, db_alias=None, column_name_filter=None, verbosity=0):
if settings.DEBUG and verbosity > 1:
print
print '*'*100
print 'get_model_meta'
print
model = djdb.get_model(model, app=app)
model_name = model._meta.name
queryset = djdb.get_queryset(model, db_alias=db_alias)
db_alias = db_alias or router.db_for_read(model)
meta, count = {}, None
try:
if verbosity > 1:
print 'Trying to count records in model %r and db_alias %r' % (model, db_alias)
count = queryset.count()
except DatabaseError as e:
if verbosity > 0:
print_exc()
print "DatabaseError: Unable to count records for model '%s' (%s) because of %s." % (model.__name__, repr(model), e)
transaction.rollback()
except:
print_exc()
print 'Connection doesnt exist?'
meta[model.__name__] = OrderedDict()
meta[model.__name__]['Meta'] = OrderedDict()
meta[model.__name__]['Meta']['primary_key'] = None
meta[model.__name__]['Meta']['count'] = count
meta[model.__name__]['Meta']['db_table'] = model._meta.db_table
if verbosity > 1:
print '%s.Meta = %r' % (model.__name__, meta[model.__name__]['Meta'])
# inspectdb uses: connection.introspection.get_table_description(cursor, table_name)
properties_of_fields = sql.get_meta_dicts(cursor=db_alias, table=meta[model_name]['Meta']['db_table'], verbosity=verbosity)
model_meta = OrderedDict((field['name'], field) for field in properties_of_fields)
if verbosity > 1:
print '-' * 20 + model_name + '-' * 20
db_primary_keys = [field['name'] for field in properties_of_fields if field['primary_key']]
if len(db_primary_keys) == 1:
meta[model_name]['Meta']['primary_key'] = db_primary_keys[0]
# augment model_meta with additional stats, but only if there are enough rows to get statistics
model_meta = augment_model_meta(model, db_alias, model_meta, column_name_filter=column_name_filter, count=count, verbosity=verbosity)
def augment_model_meta(model, db_alias, model_meta, column_name_filters=None, count=0, verbosity=0):
"""Fields are keyed by their db_column name rather than field name (like model_meta)"""
if settings.DEBUG and verbosity > 2:
print 'Augmenting model meta data for %r...' % model
column_name_filters = util.listify(column_name_filters)
queryset = djdb.get_queryset(model)
if db_alias:
queryset = queryset.using(db_alias)
for field_name in model._meta.get_all_field_names():
field = None
try:
field = model._meta.get_field(field_name)
db_column = field.db_column
# Django creates reverse ForeignKey relationship fields that may not have a database column in this table
# This happens if you make existing fields/columns in other tables a ForeignKey referencing this table
except FieldDoesNotExist:
db_column = None
if not field:
if verbosity > 0:
print "WARNING: Skipped 'phantom' field named '%s'. This is likely because of a ForeignKey relationship elsewhere back to this model (%r). No field found in the model '%s' for database '%s'." % (field_name, model, model.__name__, db_alias)
continue
if not db_column:
if field.name in model_meta:
db_column = field.name
elif field.name.lower() in model_meta:
db_column = field.name.lower()
elif field.name.upper() in model_meta:
db_column = field.name.upper()
if not db_column:
if verbosity > 0:
print "WARNING: Skipped field named '%s'. No column found in the database.table '%s.%s'." % (field.name, db_alias, model.__name__)
continue
if column_name_filters:
if not any(((callable(cnf) and cnf(db_column)) or (db_column == cnf)) for cnf in column_name_filters):
if verbosity > 0:
print "WARNING: Skipped field named '%s' for table '%s.%s' because it didn't match any filters: %r." % (field.name, db_alias, model.__name__, column_name_filters)
continue
if (field.name == 'id' and isinstance(field, models.fields.AutoField)
and field.primary_key and (not model_meta[db_column]['primary_key'])):
print "WARNING: Skipped field named '%s' for table '%s.%s' because it is an AutoField and no primary_key is defined for this table." % (field.name, db_alias, model.__name__)
continue
model_meta[db_column] = augment_field_meta(field, queryset, model_meta[db_column], count=count, verbosity=verbosity)
if verbosity > 1:
print '%s (%s of type %s) has %s / %s (%3.1f%%) distinct values between %s and %s, excluding %s nulls.' % (field.name, db_column,
model_meta[db_column]['type'],
model_meta[db_column]['num_distinct'],
count,
100. * (model_meta[db_column]['num_distinct'] or 0) / (count or 1),
repr(model_meta[db_column]['min']),
repr(model_meta[db_column]['max']),
model_meta[db_column]['num_null'])
return model_meta
def augment_field_meta(field, queryset, field_properties, verbosity=0, count=0):
"""Return a dict of statistical properties (metadata) for a database column (model field)
Strings are UTF-8 encoded (UTF-16 or invalid UTF-8 characters are ignored)
Resulting dictionary is json-serializable using the pug.nlp.db.RobustEncoder class.
{
'num_distinct': # count of distinct (different) discrete values within the column
'min': # minimum value
'max': # maximum value
'num_null': # count of the Null or None values in the column
'type': # database column type
}
TODO:
1. count the number of values that are strings that could be converted to
a. integers
b. floats
c. dates / datetimes
d. booleans / nullbooleans
e. other ordinal, categorical, or quantitative types
2. count the number of null values
a. null/None
b. blank
c. whitespace or other strings signifying null ('NULL', 'None', 'N/A', 'NaN', 'Not provided')
"""
if settings.DEBUG and verbosity > 3:
print 'Augmenting field meta data for %r...' % field
# Calculate the fraction of values in a column that are distinct (unique).
# For columns that aren't populated with 100% distinct values, the fraction may help identify columns that are part of a "unique-together" compound key
# Necessary constraint for col1 and col2 to be compound key: col1_uniqueness + col2_uniqueness >= 1.0 (100%)
# TODO: check for other clues about primary_keyness besides just uniqueness
field_properties['num_distinct'] = -1
field_properties['num_null'] = -1
field_properties['fraction_distinct'] = -1
typ = field_properties.get('type')
if typ and typ not in types_not_countable and count:
try:
field_properties['num_distinct'] = queryset.values(field.name).distinct().count()
field_properties['num_null'] = queryset.filter(**{'%s__isnull' % field.name: True}).count()
field_properties['fraction_distinct'] = float(field_properties['num_distinct']) / (count or 1)
except DatabaseError as e:
if verbosity > 0:
print_exc()
print "DatabaseError: Skipped count of values in field named '%s' (%s) because of %s." % (field.name, repr(field.db_column), e)
transaction.rollback()
try:
if field_properties['num_distinct'] > 1 and (0 < field_properties['fraction_distinct'] < 0.999):
# this will not work until pyodbc is updated
# May be related to django-pyodbc incompatability with django 1.6
# FIXME: use the working query for values.distinct.count and sort that dict and then query the top 10 of those individually
field_properties['most_frequent'] = [(v, c) for (v,c) in
queryset.distinct().values(field.name).annotate(field_value_count=models.Count(field.name))
.extra(order_by=['-field_value_count']).values_list(field.name, 'field_value_count')
[:min(field_properties['num_distinct'], 10)]
]
except (StandardError, FieldError, DatabaseError) as e:
if verbosity > 0:
print "Warning: Failed to calculate the Top-10 histogram for field named '%s' (%s) because of %s." % (field.name, repr(field.db_column), e)
if verbosity > 2:
print_exc()
field_properties['max'] = None
field_properties['min'] = None
field_properties['longest'] = None
field_properties['shortest'] = None
# check field_properties['num_null'] for all Null first?
if count and typ and typ not in types_not_aggregatable:
transaction.rollback()
try:
field_properties['max'] = db.clean_utf8(queryset.aggregate(max_value=models.Max(field.name))['max_value'])
field_properties['min'] = db.clean_utf8(queryset.aggregate(min_value=models.Min(field.name))['min_value'])
except ValueError as e:
if verbosity > 0:
print_exc()
print "ValueError (perhaps UnicodeDecodeError?): Skipped max/min calculations for field named '%s' (%s) because of %s." % (field.name, repr(field.db_column), e)
transaction.rollback()
except DatabaseError, e:
if verbosity > 0:
print_exc()
print "DatabaseError: Skipped max/min calculations for field named '%s' (%s) because of %s." % (field.name, repr(field.db_column), e)
transaction.rollback()
# validate values that might be invalid strings do to db encoding/decoding errors (make sure they are UTF-8
for k in ('min', 'max'):
db.clean_utf8(field_properties.get(k))
length_name = field.name + '___' + 'bytelength'
qs = queryset.extra(select={length_name: "LENGTH(%s)"}, select_params=(field.name,)).order_by(length_name)
if qs.exists():
# first() and last() aren't possible in Django 1.5
field_properties['shortest'] = db.clean_utf8(getattr(qs.all()[0], length_name, None))
field_properties['longest'] = db.clean_utf8(getattr(qs.order_by('-'+length_name).all()[0], length_name, None))
return field_properties
def index_with_dupes(values_list, unique_together=2, model_number_i=0, serial_number_i=1, verbosity=1):
'''Create dict from values_list with first N values as a compound key.
Default N (number of columns assumbed to be "unique_together") is 2.
>>> index_with_dupes([(1,2,3), (5,6,7), (5,6,8), (2,1,3)]) == ({(1, 2): (1, 2, 3), (2, 1): (2, 1, 3), (5, 6): (5, 6, 7)}, {(5, 6): [(5, 6, 7), (5, 6, 8)]})
True
'''
try:
N = values_list.count()
except:
N = len(values_list)
if verbosity > 0:
print 'Indexing %d values_lists in a queryset or a sequence of Django model instances (database table rows).' % N
index, dupes = {}, {}
pbar = None
if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))):
widgets = [pb.Counter(), '%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=N).start()
rownum = 0
for row in values_list:
normalized_key = [str(row[model_number_i]).strip(), str(row[serial_number_i]).strip()]
normalized_key += [i for i in range(unique_together) if i not in (serial_number_i, model_number_i)]
normalized_key = tuple(normalized_key)
if normalized_key in index:
# need to add the first nondupe before we add the dupes to the list
if normalized_key not in dupes:
dupes[normalized_key] = [index[normalized_key]]
dupes[normalized_key] = dupes[normalized_key] + [row]
if verbosity > 2:
print 'Duplicate "unique_together" tuple found. Here are all the rows that match this key:'
print dupes[normalized_key]
else:
index[normalized_key] = row
if pbar:
pbar.update(rownum)
rownum += 1
if pbar:
pbar.finish()
if verbosity > 0:
print 'Found %d duplicate model-serial pairs in the %d records or %g%%' % (len(dupes), len(index), len(dupes)*100./(len(index) or 1.))
return index, dupes
def index_model_field(model, field, value_field='pk', key_formatter=str.strip, value_formatter=str.strip, batch_len=10000, limit=10000000, verbosity=1):
'''Create dict {obj.<field>: obj.pk} for all field_values in a model or queryset.
'''
try:
qs = model.objects
except:
qs = model
N = qs.count()
if verbosity > 0:
print 'Indexing %d rows to aid in finding %s.%s values using %s.%s.' % (N, qs.model.__name__, value_field, qs.model.__name__, field)
index, dupes, rownum = {}, {}, 0
pbar, rownum = None, 0
if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))):
widgets = [pb.Counter(), '/%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=N).start()
# to determine the type of the field value and decide whether to strip() or normalize in any way
#obj0 = qs.filter(**{field + '__isnull': False}).all()[0]
for obj in qs.all():
field_value = getattr(obj, field)
try:
field_value = key_formatter(field_value)
except:
pass
if value_field:
entry_value = getattr(obj, value_field)
else:
entry_value = obj
try:
entry_value = value_formatter(entry_value)
except:
pass
if field_value in index:
dupes[field_value] = dupes.get(field_value, []) + [entry_value]
else:
index[field_value] = entry_value
rownum += 1
if rownum >= limit:
break
if pbar:
pbar.update(rownum)
if pbar:
pbar.finish()
if verbosity > 0:
print 'Found %d duplicate %s values among the %d records or %g%%' % (len(dupes), field, len(index), len(dupes)*100./(len(index) or 1.))
return index, dupes
def index_model_field_batches(model_or_queryset, key_fields=['model_number', 'serial_number'], value_fields=['pk'],
key_formatter=lambda x: str.lstrip(str.strip(str(x or '')), '0'),
value_formatter=lambda x: str.strip(str(x)), batch_len=10000,
limit=100000000, verbosity=1):
'''Like index_model_field except uses 50x less memory and 10x more processing cycles
Returns 2 dicts where both the keys and values are tuples:
target_index = {(<key_fields[0]>, <key_fields[1]>, ...): (<value_fields[0]>,)} for all distinct model-serial pairs in the Sales queryset
target_dupes = {(<key_fields[0]>, <key_fields[1]>, ...): [(<value_fields[1]>,), (<value_fields[2]>,), ...]} with all the duplicates except the first pk already listed above
'''
qs = djdb.get_queryset(model_or_queryset)
N = qs.count()
if verbosity > 0:
print 'Indexing %d rows (database records) to aid in finding record %r values using the field %r.' % (N, value_fields, key_fields)
index, dupes, rownum = {}, {}, 0
pbar, rownum = None, 0
if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))):
widgets = [pb.Counter(), '/%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=N).start()
# to determine the type of the field value and decide whether to strip() or normalize in any way
#obj0 = qs.filter(**{field + '__isnull': False}).all()[0]
value_fields = util.listify(value_fields)
key_fields = util.listify(key_fields)
for batch in djdb.generate_queryset_batches(qs, batch_len=batch_len, verbosity=verbosity):
for obj in batch:
# print obj
# normalize the key
keys = []
for kf in key_fields:
k = getattr(obj, kf)
keys += [key_formatter(k or '')]
values = []
keys = tuple(keys)
for vf in value_fields:
v = getattr(obj, vf)
values += [value_formatter(v or '')]
values = tuple(values)
if keys in index:
dupes[keys] = dupes.get(keys, []) + [values]
else:
index[keys] = values
# print rownum / float(N)
if pbar:
pbar.update(rownum)
rownum += 1
if rownum >= limit:
break
if pbar:
pbar.finish()
if verbosity > 0:
print 'Found %d duplicate %s values among the %d records or %g%%' % (len(dupes), key_fields, len(index), len(dupes)*100./(len(index) or 1.))
return index, dupes
def find_index(model_meta, weights=None, verbosity=0):
"""Return a tuple of index metadata for the model metadata dict provided
return value format is:
(
field_name,
{
'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index
},
score,
)
"""
weights = weights or find_index.default_weights
N = model_meta['Meta'].get('count', 0)
for field_name, field_meta in model_meta.iteritems():
if field_name == 'Meta':
continue
pkfield = field_meta.get('primary_key')
if pkfield:
if verbosity > 1:
print pkfield
# TODO: Allow more than one index per model/table
return {
field_name: {
'primary_key': True,
'unique': field_meta.get('unique') or (
N >= 3 and field_meta.get('num_null') <= 1
and field_meta.get('num_distinct') == N),
}}
score_names = []
for field_name, field_meta in model_meta.iteritems():
score = 0
for feature, weight in weights:
# for categorical features (strings), need to look for a particular value
value = field_meta.get(feature)
if isinstance(weight, tuple):
if value is not None and value in (float, int):
score += weight * value
if callable(weight[1]):
score += weight[0] * weight[1](field_meta.get(feature))
else:
score += weight[0] * (field_meta.get(feature) == weight[1])
else:
feature_value = field_meta.get(feature)
if feature_value is not None:
score += weight * field_meta.get(feature)
score_names += [(score, field_name)]
max_name = max(score_names)
field_meta = model_meta[max_name[1]]
return (
max_name[1],
{
'primary_key': True,
'unique': field_meta.get('unique') or (
N >= 3
and field_meta.get('num_null') <= 1
and field_meta.get('num_distinct') == N),
},
max_name[0],
)
find_index.default_weights = (('num_distinct', (1e-3, 'normalize')), ('unique', 1.), ('num_null', (-1e-3, 'normalize')), ('fraction_null', -2.),
('type', (.3, 'numeric')), ('type', (.2, 'char')), ('type',(-.3, 'text')),
)
def meta_to_indexes(meta, table_name=None, model_name=None):
"""Find all the indexes (primary keys) based on the meta data
"""
indexes, pk_field = {}, None
indexes = []
for meta_model_name, model_meta in meta.iteritems():
if (table_name or model_name) and not (table_name == model_meta['Meta'].get('db_table', '') or model_name == meta_model_name):
continue
field_name, field_infodict, score = find_index(model_meta)
indexes.append(('%s.%s' % (meta_model_name, field_name), field_infodict, score))
return indexes
def get_relations(cursor, table_name, app=DEFAULT_APP_NAME, db_alias=None):
# meta = get_db_meta(app=app, db_alias=db_alias, table=table_name, verbosity=0)
raise NotImplementedError("Not implemented: Find DB fields that appear to be related to fields elsewhere in the same DB (due to being a subset of a unique=True column in another table)")
def get_indexes(cursor, table_name, app=DEFAULT_APP_NAME, db_alias=None, verbosity=0):
meta = get_db_meta(app=app, db_alias=db_alias, table=table_name, verbosity=0)
if verbosity > 1:
print meta
raise NotImplementedError("Not implemented: Find columns in a database table that appear to be usable as an index (satisfy unique=True constraint)")
def try_convert(value, datetime_to_ms=False, precise=False):
"""Convert a str into more useful python type (datetime, float, int, bool), if possible
Some precision may be lost (e.g. Decimal converted to a float)
>>> try_convert('false')
False
>>> try_convert('123456789.123456')
123456789.123456
>>> try_convert('1234')
1234
>>> try_convert(1234)
1234
>>> try_convert(['1234'])
['1234']
>>> try_convert('12345678901234567890123456789012345678901234567890', precise=True)
12345678901234567890123456789012345678901234567890L
>>> try_convert('12345678901234567890123456789012345678901234567890.1', precise=True)
Decimal('12345678901234567890123456789012345678901234567890.1')
"""
if not isinstance(value, basestring):
return value
if value in db.YES_VALUES or value in db.TRUE_VALUES:
return True
elif value in db.NO_VALUES or value in db.FALSE_VALUES:
return False
elif value in db.NULL_VALUES:
return None
try:
if not precise:
try:
return int(value)
except:
try:
return float(value)
except:
pass
else:
dec, i, f = None, None, None
try:
dec = Decimal(value)
except:
return try_convert(value, precise=False)
try:
i = int(value)
except:
try:
f = float(value)
except:
pass
if dec is not None:
if dec == i:
return i
elif dec == f:
return f
return dec
except:
pass
try:
dt = dateutil.parse(value)
if dt and isinstance(dt, datetime.datetime) and (3000 >= dt.year >= 1900):
if datetime_to_ms:
return db.datetime_in_milliseconds(dt)
return dt
except:
pass
return value
def make_serializable(data, mutable=True, key_stringifier=lambda x:x, simplify_midnight_datetime=True):
r"""Make sure the data structure is json serializable (json.dumps-able), all they way down to scalars in nested structures.
If mutable=False then return tuples for all iterables, except basestrings (strs),
so that they can be used as keys in a Mapping (dict).
>>> from collections import OrderedDict
>>> from decimal import Decimal
>>> data = {'x': Decimal('01.234567891113151719'), 'X': [{('y', 'z'): {'q': 'A\xFFB'}}, 'ender'] }
>>> make_serializable(OrderedDict(data)) == {'X': [{('y', 'z'): {'q': 'A\xc3\xbfB'}}, 'ender'], 'x': 1.2345678911131517}
True
>>> make_serializable({'ABCs': list('abc'), datetime.datetime(2014,10,31): datetime.datetime(2014,10,31,23,59,59)}
... ) == {'ABCs': ['2014-10-16 00:00:00', 'b', 'c'], '2014-10-31 00:00:00': '2014-10-31 23:59:59'}
True
"""
# print 'serializabling: ' + repr(data)
# print 'type: ' + repr(type(data))
if isinstance(data, (datetime.datetime, datetime.date, datetime.time)):
if isinstance(data, datetime.datetime):
if not any((data.hour, data.miniute, data.seconds)):
return datetime.date(data.year, data.month, data.day)
elif data.year == data.month == data.seconds == 1:
return datetime.time(data.hour, data.minute, data.second)
return data
# s = unicode(data)
# if s.endswith('00:00:00'):
# return s[:8]
# return s
#print 'nonstring type: ' + repr(type(data))
elif isinstance(data, Model):
if isinstance(data, datetime.datetime):
if not any((data.hour, data.miniute, data.seconds)):
return datetime.date(data.year, data.month, data.day)
elif data.year == data.month == data.seconds == 1:
return datetime.time(data.hour, data.minute, data.second)
return data
elif isinstance(data, Mapping):
mapping = tuple((make_serializable(k, mutable=False, key_stringifier=key_stringifier), make_serializable(v, mutable=mutable)) for (k, v) in data.iteritems())
# print 'mapping tuple = %s' % repr(mapping)
#print 'keys list = %s' % repr([make_serializable(k, mutable=False) for k in data])
# this mutability business is probably unneccessary because the keys of the mapping will already be immutable... at least until python 3 MutableMappings
if mutable:
return dict(mapping)
return mapping
elif hasattr(data, '__iter__'):
if mutable:
#print list(make_serializable(v, mutable=mutable) for v in data)
return list(make_serializable(v, mutable=mutable) for v in data)
else:
#print tuple(make_serializable(v, mutable=mutable) for v in data)
return key_stringifier(tuple(make_serializable(v, mutable=mutable) for v in data))
elif isinstance(data, (float, Decimal)):
return float(data)
elif isinstance(data, basestring):
# Data is either a string or some other object class Django.db.models.Model etc
data = db.clean_utf8(data)
try:
return int(data)
except:
try:
return float(data)
except:
try:
# see if can be coerced into datetime by first coercing to a string
return make_serializable(dateutil.parse(unicode(data)))
except:
try:
# see if can be coerced into a dict (e.g. Dajngo Model or custom user module or class)
return make_serializable(data.__dict__)
except:
# stringify it and give up
return unicode(data)
def convert_loaded_json(js):
"""Convert strings loaded as part of a json file/string to native python types
convert_loaded_json({'x': '123'})
{'x': 123}
convert_loaded_json([{'x': '123.'}, {'x': 'Jan 28, 2014'}])
[{'x': 123}, datetime.datetime(2014, 1, 18)]
"""
if not isinstance(js, (Mapping, tuple, list)):
return try_convert(js)
try:
return type(js)(convert_loaded_json(item) for item in js.iteritems())
except:
try:
return type(js)(convert_loaded_json(item) for item in iter(js))
except:
return try_convert(js)
def models_with_unique_column(meta, exclude_single_pk=True, exclude_multi_pk=True):
"""Return a list of model names for models that have at least 1 field that has all distinct values (could be used as primary_key)"""
models_with_potential_pk = {}
fields_distinct = {}
for model_name, model_fields in meta.iteritems():
if exclude_single_pk and model_fields['Meta']['primary_key']:
continue
fields_distinct = []
for field_name, field in model_fields.iteritems():
if field_name is 'Meta':
continue
if float(field.get('fraction_distinct', 0)) == 1.:
fields_distinct += [field_name]
# if any(not field['primary_key'] and field['num_distinct'] == 1 for field_name, field in model_fields.iteritems() if field is not 'Meta'):
if (not exclude_multi_pk and fields_distinct) or len(fields_distinct) == 1:
models_with_potential_pk[model_name] = fields_distinct
return models_with_potential_pk
def get_cursor_table_names(cursor):
return [row[-2] for row in cursor.execute("""SELECT * FROM information_schema.tables""").fetchall()]
def print_cursor_table_names(cursor=None):
if isinstance(cursor, basestring):
cursor = connections[cursor].cursor()
if not cursor:
cursor = connections['default']
for table_name in get_cursor_table_names(cursor):
print table_name
class QueryTimer(object):
r"""Based on https://github.com/jfalkner/Efficient-Django-QuerySet-Use
>>> from pug.dj.miner.models import Database
>>> qt = QueryTimer()
>>> print 'If this fails, you may need to `manage.py syncdb`: %r' % list(Database.objects.values()[:1]) # doctest: +ELLIPSIS
If this fails, you may need to `manage.py syncdb`:...
>>> qt.stop() # doctest: +ELLIPSIS
QueryTimer(time=0.0..., num_queries=...)
"""
def __init__(self, time=None, num_queries=None, sql=None, conn=None):
if isinstance(conn, basestring):
conn = connections[conn]
self.conn = conn or connection
self.time, self.num_queries = time, num_queries
self.start_time, self.start_queries, self.queries = None, None, None
self.sql = sql or []
self.start()
def start(self):
self.queries = []
self.start_time = datetime.datetime.now()
self.start_queries = len(self.conn.queries)
def stop(self):
self.time = (datetime.datetime.now() - self.start_time).total_seconds()
self.queries = self.conn.queries[self.start_queries:]
self.num_queries = len(self.queries)
print self
def format_sql(self):
if self.time is None or self.queries is None:
self.stop()
if self.queries or not self.sql:
self.sql = []
for query in self.queries:
self.sql += [sqlparse.format(query['sql'], reindent=True, keyword_case='upper')]
return self.sql
def __repr__(self):
return '%s(time=%s, num_queries=%s)' % (self.__class__.__name__, self.time, self.num_queries)
# TODO: make this a django filter query of a database rather than a generator
def count_unique(table, field=-1):
"""Use the Django ORM or collections.Counter to count unique values of a field in a table
`table` is one of:
1. An iterable of Django model instances for a database table (e.g. a Django queryset)
2. An iterable of dicts or lists with elements accessed by row[field] where field can be an integer or string
3. An iterable of objects or namedtuples with elements accessed by `row.field`
`field` can be any immutable object (the key or index in a row of the table that access the value to be counted)
"""
from collections import Counter
# try/except only happens once, and fastest route (straight to db) tried first
try:
ans = {}
for row in table.distinct().values(field).annotate(field_value_count=models.Count(field)):
ans[row[field]] = row['field_value_count']
return ans
except:
try:
return Counter(row[field] for row in table)
except:
try:
return Counter(row.get(field, None) for row in table)
except:
try:
return Counter(row.getattr(field, None) for row in table)
except:
pass
def get_field_names(model, types=[models.TextField]):
names = []
for name in model.get_all_field_names():
if type(model.get_field(name)) in types:
names += [name]
return names
| |
"Various utility functions."
import datetime
import email.mime.text
import hashlib
import logging
import os
import os.path
import smtplib
import string
import uuid
import unicodedata
import couchdb2
import yaml
from publications import constants
from publications import settings
def load_settings(filepath=None, log=True):
"""Load the settings. The file path first specified is used:
1) The argument to this procedure (possibly from a command line argument).
2) The environment variable PUBLICATIONS_SETTINGS.
3) The file '../site/settings.yaml' relative to this directory.
If 'log' is True, activate logging according to DEBUG settings.
Raise IOError if settings file could not be read.
Raise KeyError if a settings variable is missing.
Raise ValueError if a settings variable value is invalid.
"""
site_dir = settings["SITE_DIR"]
if not os.path.exists(site_dir):
raise IOError(f"The required site directory '{site_dir}' does not exist.")
if not os.path.isdir(site_dir):
raise IOError(f"The site directory path '{site_dir}' is not a directory.")
# Find and read the settings file, updating the defaults.
if not filepath:
try:
filename = os.environ["PUBLICATIONS_SETTINGS"]
except KeyError:
filepath = os.path.join(site_dir, "settings.yaml")
with open(filepath) as infile:
settings.update(yaml.safe_load(infile))
settings["SETTINGS_FILE"] = filepath
# Setup logging.
if settings.get("LOGGING_DEBUG"):
kwargs = dict(level=logging.DEBUG)
else:
kwargs = dict(level=logging.INFO)
try:
kwargs["format"] = settings["LOGGING_FORMAT"]
except KeyError:
pass
try:
kwargs["filename"] = settings["LOGGING_FILEPATH"]
except KeyError:
pass
else:
try:
kwargs["filemode"] = settings["LOGGING_FILEMODE"]
except KeyError:
pass
settings["LOG"] = log
if log:
logging.basicConfig(**kwargs)
logging.info(f"Publications version {constants.VERSION}")
logging.info(f"ROOT: {constants.ROOT}")
logging.info(f"SITE_DIR: {settings['SITE_DIR']}")
logging.info(f"settings: {settings['SETTINGS_FILE']}")
logging.info(f"logging debug: {settings['LOGGING_DEBUG']}")
logging.info(f"tornado debug: {settings['TORNADO_DEBUG']}")
# Check some settings.
for key in ["BASE_URL", "PORT", "DATABASE_SERVER", "DATABASE_NAME"]:
if key not in settings:
raise KeyError(f"No settings['{key}'] item.")
if not settings[key]:
raise ValueError(f"Settings['{key}'] has invalid value.")
if len(settings.get("COOKIE_SECRET") or "") < 10:
raise ValueError("settings['COOKIE_SECRET'] not set, or too short.")
if len(settings.get("PASSWORD_SALT") or "") < 10:
raise ValueError("Settings['PASSWORD_SALT'] not set, or too short.")
for key in ["PUBMED_DELAY", "PUBMED_TIMEOUT", "CROSSREF_DELAY", "CROSSREF_TIMEOUT"]:
if not isinstance(settings[key], (int, float)) or settings[key] <= 0.0:
raise ValueError(f"Invalid '{key}' value: must be positive number.")
# Set up the xref templates URLs.
settings["XREF_TEMPLATE_URLS"] = NocaseDict(settings["XREF_TEMPLATE_URLS"])
settings["XREF_TEMPLATE_URLS"]["URL"] = "%s"
def get_dbserver():
"Return the CouchDB2 handle for the CouchDB server."
kwargs = dict(href=settings["DATABASE_SERVER"])
if settings.get("DATABASE_ACCOUNT") and settings.get("DATABASE_PASSWORD"):
kwargs["username"] = settings["DATABASE_ACCOUNT"]
kwargs["password"] = settings["DATABASE_PASSWORD"]
return couchdb2.Server(**kwargs)
def get_db():
"""Return the CouchDB2 handle for the CouchDB database.
The named database must exist.
"""
server = get_dbserver()
name = settings["DATABASE_NAME"]
try:
return server[name]
except couchdb2.NotFoundError:
raise KeyError(f"CouchDB database '{name}' does not exist.")
def init_db():
"Initialize the database by loading the design documents. Return the database."
import publications.account
import publications.blacklist
import publications.journal
import publications.label
import publications.log
import publications.publication
import publications.researcher
db = get_db()
publications.account.init(db)
publications.blacklist.init(db)
publications.journal.init(db)
publications.label.init(db)
publications.log.init(db)
publications.publication.init(db)
publications.researcher.init(db)
return db
def get_doc(db, designname, viewname, key):
"""Get the document with the given key from the given design view.
Raise KeyError if not found.
"""
view = db.view(designname, viewname, key=key, include_docs=True, reduce=False)
result = list(view)
if len(result) != 1:
raise KeyError(f"{len(result)} items found")
return result[0].doc
def get_docs(db, designname, viewname, key=None, last=None, **kwargs):
"""Get the list of documents using the given design view and
the given key or interval.
"""
if key is None:
pass
elif last is None:
kwargs["key"] = key
else:
kwargs["startkey"] = key
kwargs["endkey"] = last
view = db.view(designname, viewname, include_docs=True, reduce=False, **kwargs)
return [i.doc for i in view]
def get_count(db, designname, viewname, key=None):
"Get the reduce value for the name view and the given key."
if key is None:
view = db.view(designname, viewname, reduce=True)
else:
view = db.view(designname, viewname, key=key, reduce=True)
try:
return list(view)[0].value
except IndexError:
return 0
def get_account(db, email):
"""Get the account identified by the email address.
Raise KeyError if not found.
"""
try:
doc = get_doc(db, "account", "email", email.strip().lower())
except KeyError:
raise KeyError(f"no such account '{email}'")
return doc
def get_publication(db, identifier):
"""Get the publication given its IUID, DOI or PMID.
Raise KeyError if not found.
"""
if not identifier:
raise KeyError
identifier = identifier.lower()
try:
doc = db[identifier]
except couchdb2.NotFoundError:
doc = None
for viewname in ["doi", "pmid"]:
try:
doc = get_doc(db, "publication", viewname, identifier)
break
except KeyError:
pass
else:
raise KeyError(f"no such publication '{identifier}'.")
return doc
def get_researcher(db, identifier):
"""Get the researcher entity given its IUID or ORCID.
Raise KeyError if not found.
"""
if not identifier:
raise KeyError
try:
doc = db[identifier.lower()]
except couchdb2.NotFoundError:
try:
doc = get_doc(db, "researcher", "orcid", identifier)
except KeyError:
raise KeyError(f"no such researcher '{identifier}'.")
return doc
def get_label(db, identifier):
"""Get the label document by its IUID or value.
Raise KeyError if not found.
"""
if not identifier:
raise KeyError("no identifier provided")
try:
doc = db[identifier]
except couchdb2.NotFoundError:
identifier = to_ascii(identifier).lower()
try:
doc = get_doc(db, "label", "normalized_value", identifier)
except KeyError:
raise KeyError(f"no such label '{identifier}'")
return doc
def get_blacklisted(db, identifier):
"""Get the blacklist document if the publication with
the external identifier has been blacklisted.
"""
if not identifier:
return None
for viewname in ["doi", "pmid"]:
try:
return get_doc(db, "blacklist", viewname, identifier)
except KeyError:
pass
return None
def get_iuid():
"Return a unique instance identifier."
return uuid.uuid4().hex
def hashed_password(password):
"Return the password in hashed form."
sha256 = hashlib.sha256(settings["PASSWORD_SALT"].encode("utf-8"))
sha256.update(password.encode("utf-8"))
return sha256.hexdigest()
def check_password(password):
"""Check that the password is long and complex enough.
Raise ValueError otherwise."""
if len(password) < settings["MIN_PASSWORD_LENGTH"]:
raise ValueError(
"Password must be at least {0} characters.".format(
settings["MIN_PASSWORD_LENGTH"]
)
)
def timestamp(days=None):
"""Current date and time (UTC) in ISO format, with millisecond precision.
Add the specified offset in days, if given.
"""
instant = datetime.datetime.utcnow()
if days:
instant += datetime.timedelta(days=days)
instant = instant.isoformat()
return instant[:17] + "%06.3f" % float(instant[17:]) + "Z"
def epoch_to_iso(epoch):
"""Convert the given number of seconds since the epoch
to date and time in ISO format.
"""
dt = datetime.datetime.fromtimestamp(float(epoch))
return dt.isoformat() + "Z"
def today(days=None):
"""Current date (UTC) in ISO format.
Add the specified offset in days, if given.
"""
instant = datetime.datetime.utcnow()
if days:
instant += datetime.timedelta(days=days)
result = instant.isoformat()
return result[: result.index("T")]
def to_date(value):
"""Convert value to proper ISO format date.
Return today if None.
Raise ValueError if cannot be interpreted.
"""
if not value:
return today()
result = []
parts = value.split("-")
try:
year = int(parts[0])
try:
month = int(parts[1])
if month < 0:
raise ValueError
if month > 12:
raise ValueError
except IndexError:
month = 0
try:
day = int(parts[2])
if day < 0:
raise ValueError
if day > 31:
raise ValueError
except IndexError:
day = 0
except (TypeError, ValueError):
raise ValueError(f"invalid date '{value}'")
return "%s-%02i-%02i" % (year, month, day)
def years():
"Return a list of years from the first year to the current."
return list(range(settings["FIRST_YEAR"], int(today().split("-")[0]) + 1))
def to_ascii(value, alphanum=False):
"""Convert any non-ASCII character to its closest ASCII equivalent.
'alphanum': retain only alphanumerical characters and whitespace.
"""
if value is None:
return ""
value = unicodedata.normalize("NFKD", str(value))
value = "".join([c for c in value if not unicodedata.combining(c)])
if alphanum:
alphanum = set(string.ascii_letters + string.digits + string.whitespace)
value = "".join([c for c in value if c in alphanum])
return value
def squish(value):
"Remove all unnecessary white spaces."
return " ".join([p for p in value.split() if p])
def to_bool(value):
"Convert the value into a boolean, interpreting various string values."
if isinstance(value, bool):
return value
if not value:
return False
lowvalue = value.lower()
if lowvalue in constants.TRUE:
return True
if lowvalue in constants.FALSE:
return False
raise ValueError("invalid boolean: '{value}'")
def strip_prefix(value):
"Strip any prefix from the string value."
value = value.strip()
lowcase = value.lower()
for prefix in settings["IDENTIFIER_PREFIXES"]:
if lowcase.startswith(prefix):
return value[len(prefix) :].strip()
return value
def get_formatted_authors(authors, complete=False):
"Get formatted list of authors; partial or complete list."
if (
not complete
and len(authors)
> settings["NUMBER_FIRST_AUTHORS"] + settings["NUMBER_LAST_AUTHORS"]
):
authors = (
authors[: settings["NUMBER_FIRST_AUTHORS"]]
+ [None]
+ authors[-settings["NUMBER_LAST_AUTHORS"] :]
)
result = []
for author in authors:
if author:
name = "%s %s" % (
" ".join((author["family"] or "").split()),
author.get("initials") or "",
)
# Get rid of bizarre newlines in author names.
result.append(" ".join(name.strip().split()))
else:
result.append("...")
return ", ".join(result)
class DownloadParametersMixin:
"""Mixin for getting the parameters controlling the download output.
To be inherited by a RequestHandler subclass.
"""
def get_parameters(self):
"Return the output parameters from the form arguments."
result = dict(
single_label=to_bool(self.get_argument("single_label", False)),
all_authors=to_bool(self.get_argument("all_authors", False)),
issn=to_bool(self.get_argument("issn", False)),
numbered=to_bool(self.get_argument("numbered", False)),
doi_url=to_bool(self.get_argument("doi_url", False)),
pmid_url=to_bool(self.get_argument("pmid_url", False)),
)
try:
result["maxline"] = self.get_argument("maxline", None)
if result["maxline"]:
result["maxline"] = int(result["maxline"])
if result["maxline"] <= 20:
raise ValueError
except (ValueError, TypeError):
result["maxline"] = None
delimiter = self.get_argument("delimiter", "").lower()
if delimiter == "comma":
result["delimiter"] = ","
elif delimiter == "semi-colon":
result["delimiter"] = ";"
elif delimiter == "tab":
result["delimiter"] = "\t"
encoding = self.get_argument("encoding", "").lower()
if encoding:
result["encoding"] = encoding
return result
class EmailServer:
"A connection to an email server for sending emails."
def __init__(self):
"""Open the connection to the email server.
Raise ValueError if no email server host has been defined.
"""
try:
host = settings["EMAIL"]["HOST"]
if not host:
raise ValueError
self.email = settings.get("SITE_EMAIL") or settings["EMAIL"]["SENDER"]
except (KeyError, TypeError):
raise ValueError("email server host is not properly defined")
port = settings["EMAIL"].get("PORT") or 0
if settings["EMAIL"].get("SSL"):
self.server = smtplib.SMTP_SSL(host, port=port)
else:
self.server = smtplib.SMTP(host, port=port)
if settings["EMAIL"].get("TLS"):
self.server.starttls()
self.server.ehlo()
try:
user = settings["EMAIL"]["USER"]
password = settings["EMAIL"]["PASSWORD"]
except KeyError:
pass
else:
self.server.login(user, password)
def __del__(self):
"Close the connection to the email server."
try:
self.server.quit()
except AttributeError:
pass
def send(self, recipient, subject, text):
"Send an email."
mail = email.mime.text.MIMEText(text, "plain", "utf-8")
mail["Subject"] = subject
mail["From"] = self.email
mail["To"] = recipient
self.server.sendmail(self.email, [recipient], mail.as_string())
class NocaseDict:
"Keys are compared ignoring case."
def __init__(self, orig):
self.orig = orig.copy()
self.lower = dict()
for key in orig:
self.lower[key.lower()] = orig[key]
def keys(self):
return list(self.orig.keys())
def __getitem__(self, key):
return self.lower[key.lower()]
def __setitem__(self, key, value):
self.orig[key] = value
self.lower[key.lower()] = value
def __str__(self):
return str(dict([(k, self[k]) for k in self.keys()]))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
| |
import re
import unicodedata
from django import forms as django_forms
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
class FieldsetError(ValueError):
pass
def slugify(value):
# stolen from django.template.defaultfilters.slugify
# only difference is this functions is substituting
# spaces with underscores instead of hyphens
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+', '_', value)
class Fieldset(object):
"Simple iterable for holding fieldset information."
def __init__(self, form, title=None, fields=(),
description=None, extra_content=None):
self.form = form
self.title = title
self.fields = fields
self.description = description
self.extra_content = extra_content or {}
def __iter__(self):
"Iterates through fields in the fieldset."
for field in self.fields:
yield django_forms.forms.BoundField(self.form,
self.form.fields[field],
field)
def _html_output(self,
fieldset_html,
title_html,
description_html,
normal_row,
error_row,
row_ender,
help_text_html,
errors_on_separate_row,
top_errors=None,
error_class=django_forms.utils.ErrorList,
label_suffix=u':'):
output, hidden_fields = [], []
# top_errors is not supplied when the
# fieldset is rendered individually
if top_errors is None:
top_errors = []
top_errors_on_fieldset = True
else:
top_errors_on_fieldset = False
for bf in self:
# Escape and cache in local variable.
bf_errors = error_class([escape(error) for error in bf.errors])
if bf.is_hidden:
if bf_errors:
top_errors.extend([u'(Hidden field %s) %s' %
(bf.name, force_unicode(e))
for e in bf_errors])
hidden_fields.append(unicode(bf))
else:
if errors_on_separate_row and bf_errors:
output.append(error_row % force_unicode(bf_errors))
if bf.label:
label = escape(force_unicode(bf.label))
# Only add the suffix if the label does not end in
# punctuation.
if label_suffix:
if label[-1] not in u':?.!':
label += label_suffix
label = bf.label_tag(label) or u''
else:
label = u''
if bf.field.help_text:
help_text = help_text_html % force_unicode(
bf.field.help_text)
else:
help_text = u''
output.append(normal_row % {'errors': force_unicode(bf_errors),
'label': force_unicode(label),
'field': unicode(bf),
'help_text': help_text})
if hidden_fields:
# Insert any hidden fields in the last row.
str_hidden = u''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>')
# and insert the hidden fields.
output[-1] = last_row[:-len(row_ender)] + \
str_hidden + row_ender
else:
# If there aren't any rows in the output, just append
# the hidden fields.
output.append(str_hidden)
# Render fieldset
if self.title:
title = title_html % escape(force_unicode(self.title))
else:
title = u''
if self.description:
description = description_html % force_unicode(self.description)
else:
description = u''
if top_errors_on_fieldset and top_errors:
output.insert(0, error_row % force_unicode(top_errors))
return mark_safe(fieldset_html % {'title': title,
'description': description,
'fields': u'\n'.join(output)})
def as_table(self):
"Returns this fieldset's fields rendered as HTML <tr>s -- " \
"excluding the <table></table>."
return self._html_output(*self.form._tmpl_table)
def as_ul(self):
"Returns this fieldset's fields rendered as HTML <li>s -- " \
"excluding the <ul></ul>."
return self._html_output(*self.form._tmpl_ul)
def as_p(self):
"Returns this fieldset's fields rendered as HTML <p>s."
return self._html_output(*self.form._tmpl_p)
class FieldsetMixin(object):
_tmpl_table = (
u'<tr><th colspan="2">%(title)s%(description)s</th></tr>%(fields)s',
u'<h2>%s</h2>',
u'<div class="description">%s</div>',
u'<tr><th>%(label)s</th><td>%(errors)s' \
u'%(field)s%(help_text)s</td></tr>',
u'<tr><td colspan="2">%s</td></tr>',
u'</td></tr>',
u'<br />%s',
False,
)
_tmpl_ul = (
u'<li>%(title)s%(description)s<ul>%(fields)s</ul></li>',
u'<h2>%s</h2>',
u'<div class="description">%s</div>',
u'<li>%(errors)s%(label)s %(field)s%(help_text)s</li>',
u'<li>%s</li>',
u'</li>',
u' %s',
False,
)
_tmpl_p = (
u'<div>%(title)s%(description)s%(fields)s</div>',
u'<h2>%s</h2>',
u'<div class="description">%s</div>',
u'<p>%(label)s %(field)s%(help_text)s</p>',
u'%s',
u'</p>',
u' %s',
True,
)
def _validate_fieldsets(self):
valid = False
fields_defined = sum((fset[1]['fields'] for fset in self.fieldsets),
())
fields_set = set(fields_defined)
# Fieldsets are valid if:
# * Each field is defined in a Fieldset
# * Each field is defined only once.
if len(fields_defined) == len(fields_set) and \
set(self.fields.keys()) == fields_set:
valid = True
return valid
def validate_fieldsets(self, force=False):
"Return ``True`` if ``fieldsets`` is defined properly."
if force or not hasattr(self, '__fieldsets_valid'):
self.__fieldsets_valid = self._validate_fieldsets()
if not self.__fieldsets_valid:
raise FieldsetError('Fieldset definition for %s is invalid. ' \
'Each field must be defined in one and ' \
'only one Fieldset.' % self.__class__.__name__)
else:
return True
def iter_fieldsets(self):
"Iterates fieldsets."
self.validate_fieldsets()
for title, options in self.fieldsets:
yield Fieldset(self, title, **options)
@property
def fieldset_dict(self):
# No need to call validate_fieldsets() since we
# are using iter_fieldsets.
if not hasattr(self, '__fieldset_dict'):
self.__fieldset_dict = dict((slugify(fset.title), fset) for \
fset in self.iter_fieldsets())
return self.__fieldset_dict
def _html_fieldset_output(self,
fieldset_html,
title_html,
description_html,
normal_row,
error_row,
row_ender,
help_text_html,
errors_on_separate_row):
"Helper function for outputting fieldsets as HTML. " \
"Used by as_fieldset_table(), as_fieldset_ul(), as_fieldset_p()."
# Errors that should be displayed above all fields.
top_errors = self.non_field_errors()
output = []
for fieldset in self.iter_fieldsets():
fieldset_output = fieldset._html_output(
fieldset_html,
title_html,
description_html,
normal_row,
error_row,
row_ender,
help_text_html,
errors_on_separate_row,
top_errors,
error_class=self.error_class,
label_suffix=self.label_suffix)
output.append(fieldset_output)
if top_errors:
output.insert(0, error_row % force_unicode(top_errors))
return mark_safe(u'\n'.join(output))
def as_fieldset_table(self):
"Returns this form's fieldsets rendered as HTML <tr>s -- " \
"excluding the <table></table>."
return self._html_fieldset_output(*self._tmpl_table)
def as_fieldset_ul(self):
"Returns this form's fieldsets rendered as HTML <li>s -- " \
"excluding the <ul></ul>."
return self._html_fieldset_output(*self._tmpl_ul)
def as_fieldset_p(self):
"Returns this form's fieldsets rendered as HTML <p>s."
return self._html_fieldset_output(*self._tmpl_p)
| |
#!/usr/bin/python -u
"""
Copyright (C) 2017 Jacksgong(blog.dreamtobe.cn)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# This python script used for generating mock resources for multiple modules on multiple projects.
import re
from os import makedirs, walk
from os.path import exists, isfile, join
from shutil import copyfile
from xml.etree.ElementTree import Element, SubElement, tostring
from res_utils import assemble_res_package_name_and_path, assemble_src_and_dst_path, \
assemble_src_and_dst_path_with_folder, add_one_res_value_to_target_map, find_package_name, scan_xml_string, \
mock_res_file, mock_res_content
PACKAGE_PATH_RE = re.compile(r' *package *(.*) *;')
R_REF = re.compile(r'R\.([a-z]*)\.(\w*)')
R_DIR_REF = re.compile(r'([a-zA-Z_\.]*)\.R\.([a-z]*)\.(\w*)')
IMPORT_PACKAGE = re.compile(r'import (.*).R;')
MITMAP_PATH_RE = re.compile(r'.*res/mipmap-.*dpi')
class CombineResGenerator:
# package,{{type, [name]}, {type, [name]}}
def __init__(self):
pass
r_res = {}
# [packagename, src]
attrs_res = list()
mipmap_res = list()
# menu_res = list()
need_mock_res = True
def scan(self, path_list):
r_res = self.r_res
for repo_path in path_list:
for subdir, dirs, files in walk(repo_path):
for file_name in files:
if file_name == 'attrs.xml' and subdir.endswith('res/values'):
assemble_res_package_name_and_path(subdir, file_name, self.attrs_res)
continue
if MITMAP_PATH_RE.match(subdir):
assemble_res_package_name_and_path(subdir, file_name, self.mipmap_res)
continue
# if subdir.endswith('res/menu'):
# package_name = find_package_name(subdir)
# res_path = join(subdir, file_name)
# self.menu_res.append([package_name, res_path])
#
# string_res_list = scan_xml_string(res_path)
# for r_name in string_res_list:
# add_one_res_value_to_target_map(package_name, 'string', r_name, r_res)
# continue
if not file_name.endswith('.java'):
continue
java_path = join(subdir, file_name)
default_r_package = None
in_import_area = False
in_coding_area = False
java_file = open(java_path, "r")
is_first_valid_line = True
is_in_note_area = False
print 'scan R reference on ' + java_path
for line in java_file:
strip_line = line.strip()
if strip_line == '' or strip_line == '\n':
continue
if strip_line.startswith('/*'):
is_in_note_area = True
if is_in_note_area and '*/' in strip_line:
is_in_note_area = False
continue
if is_in_note_area:
continue
if strip_line.startswith('//') or strip_line.startswith('*'):
continue
if is_first_valid_line:
# this line must be the package line.
is_first_valid_line = False
default_r_package_search = PACKAGE_PATH_RE.search(strip_line)
if default_r_package_search is None:
exit(
"can't find package declare for line[" + strip_line + "] on java-file: " + java_path)
default_r_package = default_r_package_search.groups()[0]
if not in_coding_area:
in_import = strip_line.startswith('import')
if in_import and not in_import_area:
in_import_area = True
if not in_import and in_import_area:
in_import_area = False
in_coding_area = True
else:
package_name = default_r_package
r_ref_re_s = R_DIR_REF.findall(strip_line)
r_list = list()
if r_ref_re_s is not None:
for package_name, r_type, r_name in r_ref_re_s:
# package_name, r_type, r_name = r_ref_re.groups()
r_list.append([package_name, r_type, r_name])
# print("contain R [" + package_name + ", " + r_type + ", " + r_name + "]")
r_ref_re_s = R_REF.findall(strip_line)
if r_ref_re_s is not None:
for r_type, r_name in r_ref_re_s:
# r_type, r_name = r_ref_re.groups()
r_list.append([package_name, r_type, r_name])
# print("contain R [" + r_type + ", " + r_name + "]")
if r_list.__len__() <= 0:
# not R line, pass
continue
handled_r = list()
for package_name, r_type, r_name in r_list:
if package_name is None or r_type is None or r_name is None:
# not R line, pass
continue
if package_name + r_type + r_name in handled_r:
continue
handled_r.append(package_name + r_type + r_name)
add_one_res_value_to_target_map(package_name, r_type, r_name, r_res)
if in_import_area:
r_import = IMPORT_PACKAGE.search(strip_line)
if r_import is not None:
default_r_package = r_import.groups()[0]
def generate(self, root_dir, packagename_foldername_map):
r_module_folder_list = list()
r_res = self.r_res
un_duplicate_copy_mapping = list()
for package_name in r_res:
if package_name in packagename_foldername_map:
module_folder_name = packagename_foldername_map[package_name]
else:
module_folder_name = package_name.replace(".", "_")
if module_folder_name not in r_module_folder_list:
r_module_folder_list.append([module_folder_name, package_name])
r_module_res_path = root_dir + "/" + module_folder_name + "/res/"
r_module_values_path = r_module_res_path + "values/"
if not exists(r_module_values_path):
makedirs(r_module_values_path)
r_id_xml_path = r_module_values_path + "ids.xml"
r_public_xml_path = r_module_values_path + "public.xml"
r_id_xml = None
r_public_xml = None
r_start_value = 0x25000000
r_type_name_map = r_res[package_name]
# [ori, dst]
need_copy_file = list()
need_close_res_files = list()
for r_type in r_type_name_map:
r_name_list = r_type_name_map[r_type]
if r_type == "id":
if r_id_xml is None:
r_id_xml = Element('resources')
else:
if r_public_xml is None:
r_public_xml = Element('resources')
for r_name in r_name_list:
if r_name == 'class':
continue
if r_type == "id":
print 'add to ids.xml ' + r_name
SubElement(r_id_xml, "item", name=r_name, type="id")
else:
r_start_value += 1
id_value = hex(r_start_value)
print 'add to public.xml ' + id_value + ', ' + r_name + ', ' + r_type
SubElement(r_public_xml, "public", id=id_value, name=r_name, type=r_type)
if not self.need_mock_res:
continue
# generate mock res
if r_type == 'drawable':
# drawable
mock_res_file(r_module_res_path, r_type, r_name,
'<selector xmlns:android="http://schemas.android.com/apk/res/android"/>')
elif r_type == 'anim':
mock_res_file(r_module_res_path, r_type, r_name,
'<translate xmlns:android="http://schemas.android.com/apk/res/android"/>')
elif r_type == 'layout':
mock_res_file(r_module_res_path, r_type, r_name,
'<View xmlns:android="http://schemas.android.com/apk/res/android"\n' \
' android:layout_width="match_parent"\n' \
' android:layout_height="match_parent"/>')
elif r_type == 'xml':
mock_res_file(r_module_res_path, r_type, r_name,
'<PreferenceScreen/>')
elif r_type == 'raw':
mock_res_file(r_module_res_path, r_type, r_name, 'mock')
elif r_type == 'color':
res_path = mock_res_content(r_module_values_path, 'colors', r_name, '<color name="',
'">#000</color>')
if res_path not in need_close_res_files:
need_close_res_files.append(res_path)
elif r_type == 'dimen':
res_path = mock_res_content(r_module_values_path, 'dimens', r_name, '<dimen name="',
'">0dp</dimen>')
if res_path not in need_close_res_files:
need_close_res_files.append(res_path)
elif r_type == 'string':
res_path = mock_res_content(r_module_values_path, 'strings', r_name,
'<string name="',
'">mock</string>')
if res_path not in need_close_res_files:
need_close_res_files.append(res_path)
elif r_type == 'style':
res_path = mock_res_content(r_module_values_path, 'styles', r_name, '<style name="',
'"/>')
if res_path not in need_close_res_files:
need_close_res_files.append(res_path)
elif r_type == 'menu':
# assemble_src_and_dst_path_with_folder(r_module_res_path, r_type, r_name, 'xml',
# package_name, un_duplicate_copy_mapping,
# self.menu_res, need_copy_file)
mock_res_file(r_module_res_path, r_type, r_name,
'<menu/>')
elif r_type == 'mipmap':
assemble_src_and_dst_path_with_folder(r_module_res_path, r_type, r_name, 'png',
package_name, un_duplicate_copy_mapping,
self.mipmap_res, need_copy_file)
elif r_type == "styleable":
dst_path = r_module_values_path + 'attrs.xml'
assemble_src_and_dst_path(dst_path, 'attrs.xml', package_name, un_duplicate_copy_mapping,
self.attrs_res, need_copy_file)
if r_id_xml is not None:
with open(r_id_xml_path, "w+") as res_file:
res_file.write(tostring(r_id_xml, 'utf-8'))
if r_public_xml is not None:
with open(r_public_xml_path, "w+") as res_file:
res_file.write(tostring(r_public_xml, 'utf-8'))
for need_close_file_path in need_close_res_files:
with open(need_close_file_path, "a") as res_file:
res_file.write('</resources>')
for ori_path, dst_path in need_copy_file:
print "copy [" + ori_path + "] to " + "[" + dst_path + "]..."
copyfile(ori_path, dst_path)
return r_module_folder_list
| |
# Imports
import numpy as np
import copy
from .mapped_class import MappedClass
from .constraint import CamConstraint
from .. import utils as bvpu
from ..options import config
def parse_config_str(s, fn=float, to_array=False, marker=','):
s = [fn(x) for x in s.split(marker)]
if to_array:
s = np.array(s)
return s
# Defaults
LOCATION = [parse_config_str(config.get('camera', 'location'))]
FIX_LOCATION = [parse_config_str(config.get('camera', 'fix_location'))]
LENS = float(config.get('camera', 'lens'))
CLIP = parse_config_str(config.get('camera', 'clip'))
try:
import bpy
import mathutils as bmu
is_blender = True
except ImportError:
is_blender = False
class Camera(MappedClass):
"""Class to handle placement and fixation/angle of camera in a scene."""
def __init__(self,
location=LOCATION,
fix_location=FIX_LOCATION,
rotation_euler=None,
frames=None,
fix_frames=None,
lens=LENS,
clip=CLIP,
):
"""Class to handle placement and fixation/angle of camera in a scene.
Parameters
----------
location : list of tuples
A list of positions for each of n keyframes, each specifying camera
location as an (x, y, z) tuple.
fix_location : list of tuples
As location, but for the fixation target for the camera. Can be
None, if `rotation_euler` is specified.
rotation_euler : list of tuples
Rotation of camera, specified in radians in an (x, y, z) tuple.
Can be None, if `fix_location` is specified.
frames : list | None
A list of the keyframes at which to insert camera / fixation or
camera angles. Position is linearly interpolated for all frames
between the keyframes. If None, location is set for only one frame.
Frame indices should start at 1, not zero.
lens : scalar
Focal length for camera lens
clip : tuple
Near, far clipping planes for camera
"""
# Default camera parameters
self.type = 'Camera'
self._db_fields = []
self._data_fields = ['location', 'fix_location', 'rotation_euler',
'frames', 'fix_frames', 'lens','clip']
self._temp_fields = ['blender_camera', 'blender_fixation']
inpt = locals()
for k, v in inpt.items():
if not k in ('self', 'type'):
setattr(self, k, v)
if self.frames is None or all([x == 1 for x in self.frames]):
self.frames = (1,)
if self.fix_frames is None:
self.fix_frames = copy.copy(self.frames)
self.blender_camera = None
self.blender_fixation = None
@property
def n_loc(self):
return 1 if self.location is None else len(self.location)
@property
def n_fix(self):
return 1 if self.fix_location is None else len(self.fix_location)
@property
def n_frames(self):
return max(self.frames) - min(self.frames) + 1
@property
def n_keyframes(self):
return len(self.frames)
def __repr__(self):
S = '\n~C~ Camera ~C~\n'
S += 'Camera lens: %s, clipping: %s, frames: %s\n %d cam location key points\n %d fix location key points'%(str(self.lens),
str(self.clip), str(self.frames), self.n_loc, self.n_fix)
return S
def place(self, name='000', draw_size=0.33, scn=None):
"""Places camera into Blender scene (only works within Blender)
Parameters
----------
name : string
Name for Blender object. "cam_" is automatically prepended to the
name. [get rid of "cam_" prepending??]
draw_size : scalar
Size of camera as drawn in scene.
scn : bpy.data.scene instance
Scene to which to add the camera.
"""
if not is_blender:
raise Exception("Cannot call place() outside blender!")
if scn is None:
scn = bpy.context.scene
# Add camera
cam_data = bpy.data.cameras.new('cam_{}'.format(name))
cam = bpy.data.objects.new('cam_{}'.format(name), cam_data)
# Make camera object present in scene
if bpy.app.version < (2, 80, 0):
link = scn.objects.link
else:
link = scn.collection.objects.link
link(cam)
self.blender_camera = cam
# Set as active camera
scn.camera = cam
cam.location = self.location[0]
cam.data.lens = self.lens
cam.data.clip_start, cam.data.clip_end = self.clip
#frames = self.frames
#if (len(self.frames) == 2) and (self.frames[0] == 0) and (len(self.location) != 2):
# num_frames = len(self.location)
# frames = np.floor(np.linspace(0, self.frames[-1], num_frames,
# endpoint=True)).astype(np.int)
if self.fix_location is None and self.rotation_euler is not None:
# Set camera rotation
cam.rotation_euler = self.rotation_euler[0]
a = bvpu.blender.make_locrotscale_animation(self.frames,
action_name='CamMotion', handle_type='VECTOR',
location=self.location, rotation_euler=self.rotation_euler)
elif self.fix_location is not None and self.rotation_euler is None:
# Set camera fixation target location
fix = bpy.data.objects.new('camtarget_{}'.format(name), None)
fix.location = self.fix_location[0]
if bpy.app.version < (2, 80, 0):
fix.empty_draw_type = 'SPHERE'
fix.empty_draw_size = draw_size
else:
fix.empty_display_type = 'SPHERE'
fix.empty_display_size = draw_size
link(fix)
# Add camera constraints to look at target
trk2 = cam.constraints.new('TRACK_TO')
trk2.target = fix
trk2.track_axis = 'TRACK_NEGATIVE_Z'
trk2.up_axis = 'UP_Y'
# Set camera motion (multiple camera positions for diff. frames)
# if (len(self.location) != len(self.frames)) and (len(self.location) == self.n_frames):
# fr_cam = np.arange(self.frames[0], self.frames[-1]+1)
# else:
# fr_cam = self.frames
a = bvpu.blender.make_locrotscale_animation(self.frames,
action_name='CamMotion', handle_type='VECTOR',
location=self.location)
# if (len(self.fix_location) != len(self.frames)) and (len(self.fix_location) == self.n_frames):
# fr_fix = np.arange(self.frames[0], self.frames[-1]+1)
# else:
# fr_fix = self.frames
# # Smooth fixation, if necessary?
f = bvpu.blender.make_locrotscale_animation(self.fix_frames,
action_name='FixMotion', handle_type='AUTO',
location=self.fix_location)
fix.animation_data_create()
fix.animation_data.action = f
self.blender_fixation = fix
else:
raise ValueError(('To place a camera, either property `fix_location` or'
'`rotation_euler` must be specified!'))
# Set camera animation action
cam.animation_data_create()
cam.animation_data.action = a
def set_fixation_location(self, frames, locations):
"""Set locations for fixation target (as an animation)"""
if self.blender_fixation is None:
raise Exception("Only works within blender, with an already-instantiated camera!")
fixation_action = bvpu.blender.make_locrotscale_animation(frames,
action_name='FixMotion_update', handle_type='AUTO',
location=locations)
if self.blender_fixation.animation_data is None:
self.blender_fixation.animation_data_create()
self.blender_fixation.animation_data.action = fixation_action
def place_stereo(self, disparity, layers=None, scn=None):
"""Add two cameras for stereo rendering.
Returns two Blender Camera objects, separated by "disparity" (in
Blender units). That is, left camera is at -disparity/2, right camera
is at +disparity/2 from main camera
Parameters
----------
disparity : scalar, float
distance in Blender units between left and right cameras
layers : tuple, 20 long
boolean values for whether camera is present on each of Blender's
20 scene layers. If `None`, defaults to present on all layers.
scn : bpy.data.scene instance
Scene into which to insert cameras. `None` defaults to current
scene.
Notes
-----
There must be a single main camera in the scene first for this to work;
left and right cameras will be parented to current camera.
"""
if not is_blender:
raise Exception("Cannot call place_stereo() while operating outside Blender!")
if scn is None:
scn = bpy.context.scene
if layers is None:
layers = tuple([True for x in range(20)])
base_camera = [o for o in scn.objects if o.type == 'CAMERA']
if len(base_camera) == 0:
raise Exception('No camera in scene!')
elif len(base_camera) > 1:
raise Exception('More than 1 base camera in scene!')
else:
base_camera = base_camera[0]
# Get camera rotation fro) for x in rotation]
# Parent two new cameras to the extant camera in the scene
# Left camera
left_cam_vector = bmu.Vector((-disparity/2.0, 0, 0))
left_cam_location = base_camera.matrix_local*left_cam_vector
bpy.ops.object.camera_add(location=left_cam_location,
rotation=rotation,
layers=layers)
left_cam = bpy.context.object
# Keep same camera props as main camera
left_cam.data = base_camera.data
# Instead of the next lines, it would seem better to use
# `left_cam.parent = base_camera`, but that doesn't work for some
# reason. It messes up the transformation of left_cam.
# (Blender 2.77, July 2016)
bvpu.blender.grab_only(base_camera)
left_cam.select = True
bpy.ops.object.parent_set()
# Right camera
right_cam_vector = bmu.Vector((disparity/2.0, 0, 0))
right_cam_location = base_camera.matrix_local*right_cam_vector
bpy.ops.object.camera_add(location=right_cam_location,
rotation=rotation,
layers=layers)
right_cam = bpy.context.object
right_cam.data = base_camera.data
bvpu.blender.grab_only(base_camera)
right_cam.select = True
bpy.ops.object.parent_set()
return left_cam, right_cam
| |
"""Generated message classes for resourceviews version v1beta1.
The Resource View API allows users to create and manage logical sets of Google
Compute Engine instances.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from googlecloudsdk.third_party.apitools.base.protorpclite import messages as _messages
package = 'resourceviews'
class Label(_messages.Message):
"""The Label to be applied to the resource views.
Fields:
key: Key of the label.
value: Value of the label.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
class RegionViewsAddResourcesRequest(_messages.Message):
"""The request to add resources to the resource view.
Fields:
resources: The list of resources to be added.
"""
resources = _messages.StringField(1, repeated=True)
class RegionViewsInsertResponse(_messages.Message):
"""The response to a resource view insert request.
Fields:
resource: The resource view object inserted.
"""
resource = _messages.MessageField('ResourceView', 1)
class RegionViewsListResourcesResponse(_messages.Message):
"""The response to the list resource request.
Fields:
members: The resources in the view.
nextPageToken: A token used for pagination.
"""
members = _messages.StringField(1, repeated=True)
nextPageToken = _messages.StringField(2)
class RegionViewsListResponse(_messages.Message):
"""The response to the list resource view request.
Fields:
nextPageToken: A token used for pagination.
resourceViews: The list of resource views that meet the criteria.
"""
nextPageToken = _messages.StringField(1)
resourceViews = _messages.MessageField('ResourceView', 2, repeated=True)
class RegionViewsRemoveResourcesRequest(_messages.Message):
"""The request to remove resources from the resource view.
Fields:
resources: The list of resources to be removed.
"""
resources = _messages.StringField(1, repeated=True)
class ResourceView(_messages.Message):
"""The resource view object.
Fields:
creationTime: The creation time of the resource view.
description: The detailed description of the resource view.
id: [Output Only] The ID of the resource view.
kind: Type of the resource.
labels: The labels for events.
lastModified: The last modified time of the view. Not supported yet.
members: A list of all resources in the resource view.
name: The name of the resource view.
numMembers: The total number of resources in the resource view.
selfLink: [Output Only] A self-link to the resource view.
"""
creationTime = _messages.StringField(1)
description = _messages.StringField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default=u'resourceviews#resourceView')
labels = _messages.MessageField('Label', 5, repeated=True)
lastModified = _messages.StringField(6)
members = _messages.StringField(7, repeated=True)
name = _messages.StringField(8)
numMembers = _messages.IntegerField(9, variant=_messages.Variant.UINT32)
selfLink = _messages.StringField(10)
class ResourceviewsRegionViewsAddresourcesRequest(_messages.Message):
"""A ResourceviewsRegionViewsAddresourcesRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
regionViewsAddResourcesRequest: A RegionViewsAddResourcesRequest resource
to be passed as the request body.
resourceViewName: The name of the resource view.
"""
projectName = _messages.StringField(1, required=True)
region = _messages.StringField(2, required=True)
regionViewsAddResourcesRequest = _messages.MessageField('RegionViewsAddResourcesRequest', 3)
resourceViewName = _messages.StringField(4, required=True)
class ResourceviewsRegionViewsAddresourcesResponse(_messages.Message):
"""An empty ResourceviewsRegionViewsAddresources response."""
class ResourceviewsRegionViewsDeleteRequest(_messages.Message):
"""A ResourceviewsRegionViewsDeleteRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
resourceViewName: The name of the resource view.
"""
projectName = _messages.StringField(1, required=True)
region = _messages.StringField(2, required=True)
resourceViewName = _messages.StringField(3, required=True)
class ResourceviewsRegionViewsDeleteResponse(_messages.Message):
"""An empty ResourceviewsRegionViewsDelete response."""
class ResourceviewsRegionViewsGetRequest(_messages.Message):
"""A ResourceviewsRegionViewsGetRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
resourceViewName: The name of the resource view.
"""
projectName = _messages.StringField(1, required=True)
region = _messages.StringField(2, required=True)
resourceViewName = _messages.StringField(3, required=True)
class ResourceviewsRegionViewsInsertRequest(_messages.Message):
"""A ResourceviewsRegionViewsInsertRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
resourceView: A ResourceView resource to be passed as the request body.
"""
projectName = _messages.StringField(1, required=True)
region = _messages.StringField(2, required=True)
resourceView = _messages.MessageField('ResourceView', 3)
class ResourceviewsRegionViewsListRequest(_messages.Message):
"""A ResourceviewsRegionViewsListRequest object.
Fields:
maxResults: Maximum count of results to be returned. Acceptable values are
0 to 5000, inclusive. (Default: 5000)
pageToken: Specifies a nextPageToken returned by a previous list request.
This token can be used to request the next page of results from a
previous list request.
projectName: The project name of the resource view.
region: The region name of the resource view.
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.INT32, default=5000)
pageToken = _messages.StringField(2)
projectName = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
class ResourceviewsRegionViewsListresourcesRequest(_messages.Message):
"""A ResourceviewsRegionViewsListresourcesRequest object.
Fields:
maxResults: Maximum count of results to be returned. Acceptable values are
0 to 5000, inclusive. (Default: 5000)
pageToken: Specifies a nextPageToken returned by a previous list request.
This token can be used to request the next page of results from a
previous list request.
projectName: The project name of the resource view.
region: The region name of the resource view.
resourceViewName: The name of the resource view.
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.INT32, default=5000)
pageToken = _messages.StringField(2)
projectName = _messages.StringField(3, required=True)
region = _messages.StringField(4, required=True)
resourceViewName = _messages.StringField(5, required=True)
class ResourceviewsRegionViewsRemoveresourcesRequest(_messages.Message):
"""A ResourceviewsRegionViewsRemoveresourcesRequest object.
Fields:
projectName: The project name of the resource view.
region: The region name of the resource view.
regionViewsRemoveResourcesRequest: A RegionViewsRemoveResourcesRequest
resource to be passed as the request body.
resourceViewName: The name of the resource view.
"""
projectName = _messages.StringField(1, required=True)
region = _messages.StringField(2, required=True)
regionViewsRemoveResourcesRequest = _messages.MessageField('RegionViewsRemoveResourcesRequest', 3)
resourceViewName = _messages.StringField(4, required=True)
class ResourceviewsRegionViewsRemoveresourcesResponse(_messages.Message):
"""An empty ResourceviewsRegionViewsRemoveresources response."""
class ResourceviewsZoneViewsAddresourcesRequest(_messages.Message):
"""A ResourceviewsZoneViewsAddresourcesRequest object.
Fields:
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
zoneViewsAddResourcesRequest: A ZoneViewsAddResourcesRequest resource to
be passed as the request body.
"""
projectName = _messages.StringField(1, required=True)
resourceViewName = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
zoneViewsAddResourcesRequest = _messages.MessageField('ZoneViewsAddResourcesRequest', 4)
class ResourceviewsZoneViewsAddresourcesResponse(_messages.Message):
"""An empty ResourceviewsZoneViewsAddresources response."""
class ResourceviewsZoneViewsDeleteRequest(_messages.Message):
"""A ResourceviewsZoneViewsDeleteRequest object.
Fields:
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
"""
projectName = _messages.StringField(1, required=True)
resourceViewName = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ResourceviewsZoneViewsDeleteResponse(_messages.Message):
"""An empty ResourceviewsZoneViewsDelete response."""
class ResourceviewsZoneViewsGetRequest(_messages.Message):
"""A ResourceviewsZoneViewsGetRequest object.
Fields:
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
"""
projectName = _messages.StringField(1, required=True)
resourceViewName = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ResourceviewsZoneViewsInsertRequest(_messages.Message):
"""A ResourceviewsZoneViewsInsertRequest object.
Fields:
projectName: The project name of the resource view.
resourceView: A ResourceView resource to be passed as the request body.
zone: The zone name of the resource view.
"""
projectName = _messages.StringField(1, required=True)
resourceView = _messages.MessageField('ResourceView', 2)
zone = _messages.StringField(3, required=True)
class ResourceviewsZoneViewsListRequest(_messages.Message):
"""A ResourceviewsZoneViewsListRequest object.
Fields:
maxResults: Maximum count of results to be returned. Acceptable values are
0 to 5000, inclusive. (Default: 5000)
pageToken: Specifies a nextPageToken returned by a previous list request.
This token can be used to request the next page of results from a
previous list request.
projectName: The project name of the resource view.
zone: The zone name of the resource view.
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.INT32, default=5000)
pageToken = _messages.StringField(2)
projectName = _messages.StringField(3, required=True)
zone = _messages.StringField(4, required=True)
class ResourceviewsZoneViewsListresourcesRequest(_messages.Message):
"""A ResourceviewsZoneViewsListresourcesRequest object.
Fields:
maxResults: Maximum count of results to be returned. Acceptable values are
0 to 5000, inclusive. (Default: 5000)
pageToken: Specifies a nextPageToken returned by a previous list request.
This token can be used to request the next page of results from a
previous list request.
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.INT32, default=5000)
pageToken = _messages.StringField(2)
projectName = _messages.StringField(3, required=True)
resourceViewName = _messages.StringField(4, required=True)
zone = _messages.StringField(5, required=True)
class ResourceviewsZoneViewsRemoveresourcesRequest(_messages.Message):
"""A ResourceviewsZoneViewsRemoveresourcesRequest object.
Fields:
projectName: The project name of the resource view.
resourceViewName: The name of the resource view.
zone: The zone name of the resource view.
zoneViewsRemoveResourcesRequest: A ZoneViewsRemoveResourcesRequest
resource to be passed as the request body.
"""
projectName = _messages.StringField(1, required=True)
resourceViewName = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
zoneViewsRemoveResourcesRequest = _messages.MessageField('ZoneViewsRemoveResourcesRequest', 4)
class ResourceviewsZoneViewsRemoveresourcesResponse(_messages.Message):
"""An empty ResourceviewsZoneViewsRemoveresources response."""
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class ZoneViewsAddResourcesRequest(_messages.Message):
"""The request to add resources to the resource view.
Fields:
resources: The list of resources to be added.
"""
resources = _messages.StringField(1, repeated=True)
class ZoneViewsInsertResponse(_messages.Message):
"""The response to an insert request.
Fields:
resource: The resource view object that has been inserted.
"""
resource = _messages.MessageField('ResourceView', 1)
class ZoneViewsListResourcesResponse(_messages.Message):
"""The response to a list resource request.
Fields:
members: The full URL of resources in the view.
nextPageToken: A token used for pagination.
"""
members = _messages.StringField(1, repeated=True)
nextPageToken = _messages.StringField(2)
class ZoneViewsListResponse(_messages.Message):
"""The response to a list request.
Fields:
nextPageToken: A token used for pagination.
resourceViews: The result that contains all resource views that meet the
criteria.
"""
nextPageToken = _messages.StringField(1)
resourceViews = _messages.MessageField('ResourceView', 2, repeated=True)
class ZoneViewsRemoveResourcesRequest(_messages.Message):
"""The request to remove resources from the resource view.
Fields:
resources: The list of resources to be removed.
"""
resources = _messages.StringField(1, repeated=True)
| |
#!python
#
# Format Identification for Digital Objects
import cStringIO, zipfile, os
from xml.etree import ElementTree as ET
class NS:
"""Helper class for XML name spaces in ElementTree.
Use like MYNS=NS("{http://some/uri}") and then
MYNS(tag1/tag2).
"""
def __init__(self, uri):
self.uri = uri
def __getattr__(self, tag):
return self.uri + tag
def __call__(self, path):
return "/".join(getattr(self, tag) for tag in path.split("/"))
# XHTML namespace
XHTML = NS("{http://www.w3.org/1999/xhtml}")
# TNA namespace
TNA = NS("{http://pronom.nationalarchives.gov.uk}")
def get_text_tna(element, tag, default=''):
"""Helper function to return the text for a tag or path using the TNA namespace.
"""
part = element.find(TNA(tag))
return part.text.strip() if part != None and part.text != None else default
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
from xml.dom import minidom
rough_string = ET.tostring(elem, 'UTF-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
class FormatInfo:
def __init__(self, pronom_files, format_list=[]):
self.info = {}
self.formats = []
self.pronom_files = pronom_files
for f in format_list:
self.add_format(f)
def save(self, dst):
"""Write the fido XML format definitions to @param dst
"""
tree = ET.ElementTree(ET.Element('formats', {'version':'0.2',
'xmlns:xsi' : "http://www.w3.org/2001/XMLSchema-instance",
'xsi:noNamespaceSchemaLocation': "fido-formats.xsd"}))
root = tree.getroot()
for f in self.formats:
if f.find('signature'):
root.append(f)
with open(dst, 'wb') as out:
print >>out, ET.tostring(root,encoding='UTF-8')
def load_pronom_xml(self):
"""Load the pronom XML from self.pronom_files and convert it to fido XML.
As a side-effect, set self.formats to a list of ElementTree.Element
"""
formats = []
try:
zip = zipfile.ZipFile(self.pronom_files, 'r')
for item in zip.infolist():
try:
stream = zip.open(item)
# Work is done here!
formats.append(self.parse_pronom_xml(stream))
finally:
stream.close()
finally:
zip.close()
# Replace the formatID with puids in has_priority_over
id_map = {}
for element in formats:
puid = element.find('puid').text
pronom_id = element.find('pronom_id').text
id_map[pronom_id] = puid
for element in formats:
for rel in element.findall('has_priority_over'):
rel.text = id_map[rel.text]
self._sort_formats(formats)
self.formats = formats
def parse_pronom_xml(self, source):
"""Read a pronom XML from @param source, convert it to fido XML and
@return ET.ElementTree Element representing it.
"""
pronom_xml = ET.parse(source)
pronom_root = pronom_xml.getroot()
pronom_format = pronom_root.find(TNA('report_format_detail/FileFormat'))
fido_format = ET.Element('format')
# Get the base Format information
for id in pronom_format.findall(TNA('FileFormatIdentifier')):
type = get_text_tna(id, 'IdentifierType')
if type == 'PUID':
puid = get_text_tna(id, 'Identifier')
ET.SubElement(fido_format, 'puid').text = puid
# A bit clumsy. I want to have puid first, then mime, then container.
for id in pronom_format.findall(TNA('FileFormatIdentifier')):
type = get_text_tna(id, 'IdentifierType')
if type == 'MIME':
ET.SubElement(fido_format, 'mime').text = get_text_tna(id, 'Identifier')
elif type == 'PUID':
puid = get_text_tna(id, 'Identifier')
if puid == 'x-fmt/263':
ET.SubElement(fido_format, 'container').text = 'zip'
elif puid == 'x-fmt/265':
ET.SubElement(fido_format, 'container').text = 'tar'
ET.SubElement(fido_format, 'name').text = get_text_tna(pronom_format, 'FormatName')
ET.SubElement(fido_format, 'pronom_id').text = get_text_tna(pronom_format, 'FormatID')
# Get the extensions from the ExternalSignature
for x in pronom_format.findall(TNA('ExternalSignature')):
ET.SubElement(fido_format, 'extension').text = get_text_tna(x, 'Signature')
# Handle the relationships
for x in pronom_format.findall(TNA('RelatedFormat')):
rel = get_text_tna(x, 'RelationshipType')
if rel == 'Has priority over':
ET.SubElement(fido_format, 'has_priority_over').text = get_text_tna(x, 'RelatedFormatID')
# Get the InternalSignature information
for pronom_sig in pronom_format.findall(TNA('InternalSignature')):
fido_sig = ET.SubElement(fido_format, 'signature')
ET.SubElement(fido_sig, 'name').text = get_text_tna(pronom_sig, 'SignatureName')
# There are some funny chars in the notes, which caused me trouble and it is a unicode string,
ET.SubElement(fido_sig, 'note').text = get_text_tna(pronom_sig, 'SignatureNote').encode('UTF-8')
for pronom_pat in pronom_sig.findall(TNA('ByteSequence')):
fido_pat = ET.SubElement(fido_sig, 'pattern')
pos = fido_position(get_text_tna(pronom_pat, 'PositionType'))
bytes = get_text_tna(pronom_pat, 'ByteSequenceValue')
offset = get_text_tna(pronom_pat, 'Offset')
max_offset = get_text_tna(pronom_pat, 'MaxOffset')
if max_offset == None:
pass
regex = convert_to_regex(bytes, 'Little', pos, offset, max_offset)
ET.SubElement(fido_pat, 'position').text = pos
ET.SubElement(fido_pat, 'pronom_pattern').text = bytes
ET.SubElement(fido_pat, 'regex').text = regex
return fido_format
#FIXME: I don't think that this quite works yet!
def _sort_formats(self, formatlist):
"""Sort the format list based on their priority relationships so higher priority
formats appear earlier in the list.
"""
def compare_formats(f1, f2):
f1ID = f1.find('puid').text
f2ID = f2.find('puid').text
for worse in f1.findall('has_priority_over'):
if worse.text == f2ID:
return - 1
for worse in f2.findall('has_priority_over'):
if worse.text == f1ID:
return 1
if f1ID < f2ID:
return - 1
elif f1ID == f2ID:
return 0
else:
return 1
return sorted(formatlist, cmp=compare_formats)
def fido_position(pronom_position):
"""@return BOF/EOF/VAR instead of the more verbose pronom position names.
"""
if pronom_position == 'Absolute from BOF':
return 'BOF'
elif pronom_position == 'Absolute from EOF':
return 'EOF'
elif pronom_position == 'Variable':
return 'VAR'
else:
raise Exception("Unknown pronom PositionType=" + pronom_position)
def _convert_err_msg(msg, c, i, chars):
return "Conversion: {0}: char='{1}', at pos {2} in \n {3}\n {4}^".format(msg, c, i, chars, i * ' ')
def doByte(chars, i, littleendian):
"""Convert two chars[i] and chars[i+1] into a byte.
@return a tuple (byte, 2)
"""
c1 = '0123456789ABCDEF'.find(chars[i].upper())
c2 = '0123456789ABCDEF'.find(chars[i + 1].upper())
if (c1 < 0 or c2 < 0):
raise Exception(_convert_err_msg('bad byte sequence', chars[i:i + 2], i, chars))
if littleendian:
val = chr(16 * c1 + c2)
else:
val = chr(c1 + 16 * c2)
return (escape(val), 2)
# \a\b\n\r\t\v
_ordinary = frozenset(' !"#%&\',-/0123456789:;<=>@ABCDEFGHIJKLMNOPQRSTUVWXYZ_`abcdefghijklmnopqrstuvwxyz~')
_special = '$()*+.?[]^\\{|}'
_hex = '0123456789abcdef'
def _escape_char(c):
if c in '\n':
return '\\n'
elif c == '\r':
return '\\r'
elif c in _special:
return '\\' + c
else:
(high, low) = divmod(ord(c), 16)
return '\\x' + _hex[high] + _hex[low]
def escape(string):
"Escape characters in pattern that are non-printable, non-ascii, or special for regexes."
return ''.join(c if c in _ordinary else _escape_char(c) for c in string)
def convert_to_regex(chars, endianness='', pos='BOF', offset='0', maxoffset=''):
"""Convert
@param chars, a pronom bytesequence, into a
@return regular expression.
Endianness is not used.
"""
if 'Big' in endianness:
littleendian = False
else:
littleendian = True
if len(offset) == 0:
offset = '0'
if len(maxoffset) == 0:
maxoffset = None
buf = cStringIO.StringIO()
buf.write("(?s)") #If a regex starts with (?s), it is equivalent to DOTALL.
i = 0
state = 'start'
if 'BOF' in pos:
buf.write('\\A')
if offset != '0':
buf.write('.{')
buf.write(str(offset))
if maxoffset != None:
buf.write(',' + maxoffset)
buf.write('}')
elif maxoffset != None:
buf.write('.{0,' + maxoffset + '}')
while True:
if i == len(chars):
break
#print _convert_err_msg(state,chars[i],i,chars)
if state == 'start':
if chars[i].isalnum():
state = 'bytes'
elif chars[i] == '[' and chars[i + 1] == '!':
state = 'non-match'
elif chars[i] == '[':
state = 'bracket'
elif chars[i] == '{':
state = 'curly'
elif chars[i] == '(':
state = 'paren'
elif chars[i] in '*+?':
state = 'specials'
else:
raise Exception(_convert_err_msg('Illegal character in start', chars[i], i, chars))
elif state == 'bytes':
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
state = 'start'
elif state == 'non-match':
buf.write('(!')
i += 2
while True:
if chars[i].isalnum():
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
elif chars[i] == ']':
break
else:
raise Exception(_convert_err_msg('Illegal character in non-match', chars[i], i, chars))
buf.write(')')
i += 1
state = 'start'
elif state == 'bracket':
try:
buf.write('[')
i += 1
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
assert(chars[i] == ':')
buf.write('-')
i += 1
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
assert(chars[i] == ']')
buf.write(']')
i += 1
except Exception:
print _convert_err_msg('Illegal character in bracket', chars[i], i, chars)
raise
if i < len(chars) and chars[i] == '{':
state = 'curly-after-bracket'
else:
state = 'start'
elif state == 'paren':
buf.write('(?:')
i += 1
while True:
if chars[i].isalnum():
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
elif chars[i] == '|':
buf.write('|')
i += 1
elif chars[i] == ')':
break
else:
raise Exception(_convert_err_msg('Illegal character in paren', chars[i], i, chars))
buf.write(')')
i += 1
state = 'start'
elif state in ['curly', 'curly-after-bracket']:
# {nnnn} or {nnn-nnn} or {nnn-*}
# {nnn} or {nnn,nnn} or {nnn,}
# when there is a curly-after-bracket, then the {m,n} applies to the bracketed item
# The above, while sensible, appears to be incorrect. A '.' is always needed.
# for droid equiv behavior
#if state == 'curly':
buf.write('.')
buf.write('{')
i += 1 # skip the (
while True:
if chars[i].isalnum():
buf.write(chars[i])
i += 1
elif chars[i] == '-':
buf.write(',')
i += 1
elif chars[i] == '*': # skip the *
i += 1
elif chars[i] == '}':
break
else:
raise Exception(_convert_err_msg('Illegal character in curly', chars[i], i, chars))
buf.write('}')
i += 1 # skip the )
state = 'start'
elif state == 'specials':
if chars[i] == '*':
buf.write('.*')
i += 1
elif chars[i] == '+':
buf.write('.+')
i += 1
elif chars[i] == '?':
if chars[i + 1] != '?':
raise Exception(_convert_err_msg('Illegal character after ?', chars[i + 1], i + 1, chars))
buf.write('.?')
i += 2
state = 'start'
else:
raise Exception('Illegal state {0}'.format(state))
if 'EOF' in pos:
if offset != '0':
buf.write('.{' + offset)
if maxoffset != None:
buf.write(',' + maxoffset)
buf.write('}')
elif maxoffset != None:
buf.write('.{0,' + maxoffset + '}')
buf.write('\\Z')
val = buf.getvalue()
buf.close()
return val
if __name__ == '__main__':
import sys
from argparselocal import ArgumentParser
arglist = sys.argv[1:]
mydir = os.path.abspath(os.path.dirname(__file__))
parser = ArgumentParser(description='Produce the fido format xml that is loaded at run-time')
parser.add_argument('-input', default=os.path.join(mydir, 'conf', 'pronom-xml.zip'), help='input file, a zip containing Pronom xml files')
parser.add_argument('-output', default=os.path.join(mydir, 'conf', 'formats.xml'), help='output file')
# PROCESS ARGUMENTS
args = parser.parse_args(arglist)
# print os.path.abspath(args.input), os.path.abspath(args.output)
info = FormatInfo(args.input)
info.load_pronom_xml()
info.save(args.output)
print >> sys.stderr, 'FIDO: {0} formats'.format(len(info.formats))
| |
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import inspect
import unittest
import oslo_db.exception
import sqlalchemy as sql
from tricircle.common import context
from tricircle.common import exceptions
from tricircle.db import api
from tricircle.db import core
from tricircle.db import models
def _get_field_value(column):
"""Get field value for resource creating
returning None indicates that not setting this field in resource dict
"""
if column.nullable:
# just skip nullable column
return None
if isinstance(column.type, sql.Text):
return 'fake_text'
elif isinstance(column.type, sql.Enum):
return column.type.enums[0]
elif isinstance(column.type, sql.String):
return 'fake_str'
elif isinstance(column.type, sql.Integer):
return 1
elif isinstance(column.type, sql.Float):
return 1.0
elif isinstance(column.type, sql.Boolean):
return True
elif isinstance(column.type, sql.DateTime):
return datetime.datetime.utcnow()
else:
return None
def _construct_resource_dict(resource_class):
ret_dict = {}
for field in inspect.getmembers(resource_class):
if field[0] in resource_class.attributes:
field_value = _get_field_value(field[1])
if field_value is not None:
ret_dict[field[0]] = field_value
return ret_dict
def _sort_model_by_foreign_key(resource_class_list):
"""Apply topology sorting to obey foreign key constraints"""
relation_map = {}
table_map = {}
# {table: (set(depend_on_table), set(depended_by_table))}
for resource_class in resource_class_list:
table = resource_class.__tablename__
if table not in relation_map:
relation_map[table] = (set(), set())
if table not in table_map:
table_map[table] = resource_class
for field in inspect.getmembers(resource_class):
if field[0] in resource_class.attributes:
f_keys = field[1].foreign_keys
for f_key in f_keys:
f_table = f_key.column.table.name
# just skip self reference
if table == f_table:
continue
relation_map[table][0].add(f_table)
if f_table not in relation_map:
relation_map[f_table] = (set(), set())
relation_map[f_table][1].add(table)
sorted_list = []
total = len(relation_map)
while len(sorted_list) < total:
candidate_table = None
for table in relation_map:
# no depend-on table
if not relation_map[table][0]:
candidate_table = table
sorted_list.append(candidate_table)
for _table in relation_map[table][1]:
relation_map[_table][0].remove(table)
break
del relation_map[candidate_table]
return [table_map[table] for table in sorted_list]
class ModelsTest(unittest.TestCase):
def setUp(self):
core.initialize()
core.ModelBase.metadata.create_all(core.get_engine())
self.context = context.Context()
def test_obj_to_dict(self):
pod = {'pod_id': 'test_pod_uuid',
'region_name': 'test_pod',
'pod_az_name': 'test_pod_az_name',
'dc_name': 'test_dc_name',
'az_name': 'test_az_uuid'}
pod_obj = models.Pod.from_dict(pod)
for attr in pod_obj.attributes:
self.assertEqual(getattr(pod_obj, attr), pod[attr])
def test_create(self):
pod = {'pod_id': 'test_pod_uuid',
'region_name': 'test_pod',
'pod_az_name': 'test_pod_az_name',
'dc_name': 'test_dc_name',
'az_name': 'test_az_uuid'}
pod_ret = api.create_pod(self.context, pod)
self.assertEqual(pod_ret, pod)
configuration = {
'service_id': 'test_config_uuid',
'pod_id': 'test_pod_uuid',
'service_type': 'nova',
'service_url': 'http://test_url'
}
config_ret = api.create_cached_endpoints(self.context,
configuration)
self.assertEqual(config_ret, configuration)
def test_update(self):
pod = {'pod_id': 'test_pod_uuid',
'region_name': 'test_pod',
'az_name': 'test_az1_uuid'}
api.create_pod(self.context, pod)
update_dict = {'pod_id': 'fake_uuid',
'region_name': 'test_pod2',
'az_name': 'test_az2_uuid'}
ret = api.update_pod(self.context, 'test_pod_uuid', update_dict)
# primary key value will not be updated
self.assertEqual(ret['pod_id'], 'test_pod_uuid')
self.assertEqual(ret['region_name'], 'test_pod2')
self.assertEqual(ret['az_name'], 'test_az2_uuid')
def test_delete(self):
pod = {'pod_id': 'test_pod_uuid',
'region_name': 'test_pod',
'az_name': 'test_az_uuid'}
api.create_pod(self.context, pod)
api.delete_pod(self.context, 'test_pod_uuid')
self.assertRaises(exceptions.ResourceNotFound, api.get_pod,
self.context, 'test_pod_uuid')
def test_query(self):
pod1 = {'pod_id': 'test_pod1_uuid',
'region_name': 'test_pod1',
'pod_az_name': 'test_pod_az_name1',
'dc_name': 'test_dc_name1',
'az_name': 'test_az1_uuid'}
pod2 = {'pod_id': 'test_pod2_uuid',
'region_name': 'test_pod2',
'pod_az_name': 'test_pod_az_name2',
'dc_name': 'test_dc_name1',
'az_name': 'test_az2_uuid'}
api.create_pod(self.context, pod1)
api.create_pod(self.context, pod2)
filters = [{'key': 'region_name',
'comparator': 'eq',
'value': 'test_pod2'}]
pods = api.list_pods(self.context, filters)
self.assertEqual(len(pods), 1)
self.assertEqual(pods[0], pod2)
filters = [{'key': 'region_name',
'comparator': 'eq',
'value': 'test_pod3'}]
pods = api.list_pods(self.context, filters)
self.assertEqual(len(pods), 0)
def test_sort(self):
pod1 = {'pod_id': 'test_pod1_uuid',
'region_name': 'test_pod1',
'pod_az_name': 'test_pod_az_name1',
'dc_name': 'test_dc_name1',
'az_name': 'test_az1_uuid'}
pod2 = {'pod_id': 'test_pod2_uuid',
'region_name': 'test_pod2',
'pod_az_name': 'test_pod_az_name2',
'dc_name': 'test_dc_name1',
'az_name': 'test_az2_uuid'}
pod3 = {'pod_id': 'test_pod3_uuid',
'region_name': 'test_pod3',
'pod_az_name': 'test_pod_az_name3',
'dc_name': 'test_dc_name1',
'az_name': 'test_az3_uuid'}
pods = [pod1, pod2, pod3]
for pod in pods:
api.create_pod(self.context, pod)
pods = api.list_pods(self.context,
sorts=[(models.Pod.pod_id, False)])
self.assertEqual(pods, [pod3, pod2, pod1])
def test_resources(self):
"""Create all the resources to test model definition"""
try:
model_list = []
for _, model_class in inspect.getmembers(models):
if inspect.isclass(model_class) and (
issubclass(model_class, core.ModelBase)):
model_list.append(model_class)
for model_class in _sort_model_by_foreign_key(model_list):
create_dict = _construct_resource_dict(model_class)
with self.context.session.begin():
core.create_resource(
self.context, model_class, create_dict)
except Exception as e:
msg = str(e)
self.fail('test_resources raised Exception unexpectedly %s' % msg)
def test_resource_routing_unique_key(self):
pod = {'pod_id': 'test_pod1_uuid',
'region_name': 'test_pod1',
'az_name': 'test_az1_uuid'}
api.create_pod(self.context, pod)
routing = {'top_id': 'top_uuid',
'pod_id': 'test_pod1_uuid',
'resource_type': 'port'}
with self.context.session.begin():
core.create_resource(self.context, models.ResourceRouting, routing)
self.assertRaises(oslo_db.exception.DBDuplicateEntry,
core.create_resource,
self.context, models.ResourceRouting, routing)
def tearDown(self):
core.ModelBase.metadata.drop_all(core.get_engine())
| |
#! /usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from time import time
from os.path import join, abspath, dirname, normpath
from optparse import OptionParser
import json
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.build_api import build_mbed_libs
from workspace_tools.build_api import write_build_report
from workspace_tools.targets import TARGET_MAP
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
from workspace_tools.test_api import SingleTestRunner
from workspace_tools.test_api import singletest_in_cli_mode
from workspace_tools.paths import TEST_DIR
from workspace_tools.tests import TEST_MAP
OFFICIAL_MBED_LIBRARY_BUILD = (
('LPC11U24', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
('LPC1768', ('ARM', 'GCC_ARM', 'GCC_CR', 'IAR')),
('UBLOX_C027', ('ARM', 'GCC_ARM', 'GCC_CR', 'IAR')),
('ARCH_PRO', ('ARM', 'GCC_ARM', 'GCC_CR', 'IAR')),
('LPC2368', ('ARM', 'GCC_ARM')),
('LPC2460', ('GCC_ARM',)),
('LPC812', ('uARM','IAR')),
('LPC824', ('uARM', 'GCC_ARM', 'IAR', 'GCC_CR')),
('SSCI824', ('uARM','GCC_ARM')),
('LPC1347', ('ARM','IAR')),
('LPC4088', ('ARM', 'GCC_ARM', 'GCC_CR', 'IAR')),
('LPC4088_DM', ('ARM', 'GCC_ARM', 'GCC_CR', 'IAR')),
('LPC1114', ('uARM','GCC_ARM', 'GCC_CR', 'IAR')),
('LPC11U35_401', ('ARM', 'uARM','GCC_ARM','GCC_CR', 'IAR')),
('LPC11U35_501', ('ARM', 'uARM','GCC_ARM','GCC_CR', 'IAR')),
('LPC1549', ('uARM','GCC_ARM','GCC_CR', 'IAR')),
('XADOW_M0', ('ARM', 'uARM','GCC_ARM','GCC_CR')),
('ARCH_GPRS', ('ARM', 'uARM', 'GCC_ARM', 'GCC_CR', 'IAR')),
('LPC4337', ('ARM',)),
('LPC11U37H_401', ('ARM', 'uARM','GCC_ARM','GCC_CR')),
('MICRONFCBOARD', ('ARM', 'uARM','GCC_ARM')),
('KL05Z', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
('KL25Z', ('ARM', 'GCC_ARM', 'IAR')),
('KL27Z', ('ARM', 'GCC_ARM', 'IAR')),
('KL43Z', ('ARM', 'GCC_ARM')),
('KL46Z', ('ARM', 'GCC_ARM', 'IAR')),
('K64F', ('ARM', 'GCC_ARM', 'IAR')),
('K22F', ('ARM', 'GCC_ARM', 'IAR')),
('K20D50M', ('ARM', 'GCC_ARM' , 'IAR')),
('TEENSY3_1', ('ARM', 'GCC_ARM')),
('B96B_F446VE', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F030R8', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F031K6', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F042K6', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F070RB', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F072RB', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F091RC', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F103RB', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F302R8', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F303K8', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F303RE', ('ARM', 'uARM', 'IAR')),
('NUCLEO_F334R8', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F401RE', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F410RB', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F411RE', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F446RE', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('ELMO_F411RE', ('ARM', 'uARM', 'GCC_ARM')),
('NUCLEO_L053R8', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_L152RE', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('MTS_MDOT_F405RG', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('MTS_MDOT_F411RE', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('MTS_DRAGONFLY_F411RE', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('DISCO_L053C8', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('DISCO_F334C8', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('DISCO_F429ZI', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('DISCO_F469NI', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('DISCO_F746NG', ('ARM', 'uARM', 'GCC_ARM','IAR')),
('DISCO_L476VG', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_L476RG', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('NUCLEO_F746ZG', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
('NUCLEO_L031K6', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
('NUCLEO_L073RZ', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
('MOTE_L152RC', ('ARM', 'uARM', 'IAR', 'GCC_ARM')),
('ARCH_MAX', ('ARM', 'GCC_ARM')),
('NRF51822', ('ARM', 'GCC_ARM', 'IAR')),
('NRF51_DK', ('ARM', 'GCC_ARM', 'IAR')),
('NRF51_DONGLE', ('ARM', 'GCC_ARM', 'IAR')),
('HRM1017', ('ARM', 'GCC_ARM', 'IAR')),
('ARCH_BLE', ('ARM', 'GCC_ARM', 'IAR')),
('SEEED_TINY_BLE', ('ARM', 'GCC_ARM', 'IAR')),
('RBLAB_NRF51822', ('ARM', 'GCC_ARM')),
('RBLAB_BLENANO', ('ARM', 'GCC_ARM')),
('WALLBOT_BLE', ('ARM', 'GCC_ARM')),
('DELTA_DFCM_NNN40', ('ARM', 'GCC_ARM')),
('NRF51_MICROBIT', ('ARM','GCC_ARM')),
('NRF51_MICROBIT_B', ('ARM',)),
('TY51822R3', ('ARM', 'GCC_ARM')),
('LPC11U68', ('ARM', 'uARM','GCC_ARM','GCC_CR', 'IAR')),
('OC_MBUINO', ('ARM', 'uARM', 'GCC_ARM', 'IAR')),
('ARM_MPS2_M0' , ('ARM',)),
('ARM_MPS2_M0P' , ('ARM',)),
('ARM_MPS2_M3' , ('ARM',)),
('ARM_MPS2_M4' , ('ARM',)),
('ARM_MPS2_M7' , ('ARM',)),
('ARM_MPS2_BEID' , ('ARM',)),
('RZ_A1H' , ('ARM', 'GCC_ARM', 'IAR')),
('EFM32ZG_STK3200', ('GCC_ARM', 'uARM')),
('EFM32HG_STK3400', ('GCC_ARM', 'uARM')),
('EFM32LG_STK3600', ('ARM', 'GCC_ARM', 'uARM')),
('EFM32GG_STK3700', ('ARM', 'GCC_ARM', 'uARM')),
('EFM32WG_STK3800', ('ARM', 'GCC_ARM', 'uARM')),
('EFM32PG_STK3401', ('ARM', 'GCC_ARM', 'uARM')),
('MAXWSNENV', ('ARM', 'GCC_ARM', 'IAR')),
('MAX32600MBED', ('ARM', 'GCC_ARM', 'IAR')),
('WIZWIKI_W7500', ('ARM', 'uARM')),
('WIZWIKI_W7500P',('ARM', 'uARM')),
('WIZWIKI_W7500ECO',('ARM', 'uARM')),
('SAMR21G18A',('ARM', 'uARM', 'GCC_ARM')),
('SAMD21J18A',('ARM', 'uARM', 'GCC_ARM')),
('SAMD21G18A',('ARM', 'uARM', 'GCC_ARM')),
)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-o', '--official', dest="official_only", default=False, action="store_true",
help="Build using only the official toolchain for each target")
parser.add_option("-j", "--jobs", type="int", dest="jobs",
default=1, help="Number of concurrent jobs (default 1). Use 0 for auto based on host machine's number of CPUs")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Verbose diagnostic output")
parser.add_option("-t", "--toolchains", dest="toolchains", help="Use toolchains names separated by comma")
parser.add_option("-p", "--platforms", dest="platforms", default="", help="Build only for the platform namesseparated by comma")
parser.add_option("-L", "--list-config", action="store_true", dest="list_config",
default=False, help="List the platforms and toolchains in the release in JSON")
parser.add_option("", "--report-build", dest="report_build_file_name", help="Output the build results to an junit xml file")
parser.add_option("", "--build-tests", dest="build_tests", help="Build all tests in the given directories (relative to /libraries/tests)")
options, args = parser.parse_args()
if options.list_config:
print json.dumps(OFFICIAL_MBED_LIBRARY_BUILD, indent=4)
sys.exit()
start = time()
build_report = {}
build_properties = {}
platforms = None
if options.platforms != "":
platforms = set(options.platforms.split(","))
if options.build_tests:
# Get all paths
directories = options.build_tests.split(',')
for i in range(len(directories)):
directories[i] = normpath(join(TEST_DIR, directories[i]))
test_names = []
for test_id in TEST_MAP.keys():
# Prevents tests with multiple source dirs from being checked
if isinstance( TEST_MAP[test_id].source_dir, basestring):
test_path = normpath(TEST_MAP[test_id].source_dir)
for directory in directories:
if directory in test_path:
test_names.append(test_id)
mut_counter = 1
mut = {}
test_spec = {
"targets": {}
}
for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
toolchains = None
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue
if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list
if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet.intersection(set((options.toolchains).split(',')))
mut[str(mut_counter)] = {
"mcu": target_name
}
mut_counter += 1
test_spec["targets"][target_name] = toolchains
single_test = SingleTestRunner(_muts=mut,
_opts_report_build_file_name=options.report_build_file_name,
_test_spec=test_spec,
_opts_test_by_names=",".join(test_names),
_opts_verbose=options.verbose,
_opts_only_build_tests=True,
_opts_suppress_summary=True,
_opts_jobs=options.jobs,
_opts_include_non_automated=True,
_opts_build_report=build_report,
_opts_build_properties=build_properties)
# Runs test suite in CLI mode
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, new_build_report, new_build_properties = single_test.execute()
else:
for target_name, toolchain_list in OFFICIAL_MBED_LIBRARY_BUILD:
if platforms is not None and not target_name in platforms:
print("Excluding %s from release" % target_name)
continue
if options.official_only:
toolchains = (getattr(TARGET_MAP[target_name], 'default_toolchain', 'ARM'),)
else:
toolchains = toolchain_list
if options.toolchains:
print "Only building using the following toolchains: %s" % (options.toolchains)
toolchainSet = set(toolchains)
toolchains = toolchainSet.intersection(set((options.toolchains).split(',')))
for toolchain in toolchains:
id = "%s::%s" % (target_name, toolchain)
try:
built_mbed_lib = build_mbed_libs(TARGET_MAP[target_name], toolchain, verbose=options.verbose, jobs=options.jobs, report=build_report, properties=build_properties)
except Exception, e:
print str(e)
# Write summary of the builds
if options.report_build_file_name:
file_report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
file_report_exporter.report_to_file(build_report, options.report_build_file_name, test_suite_properties=build_properties)
print "\n\nCompleted in: (%.2f)s" % (time() - start)
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
if not status:
sys.exit(1)
| |
#!/usr/bin/env python
# Libs
from dotenv import load_dotenv
from flask import Flask, jsonify, abort, request, make_response
from pymongo import MongoClient, ReturnDocument
from flask_httpauth import HTTPBasicAuth
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from string import hexdigits
from hashlib import md5
from functools import wraps
from datetime import datetime
import os
import re
# Env
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, ".env"))
# App and add-ons
app = Flask(__name__)
auth = HTTPBasicAuth()
limiter = Limiter(
app, key_func=get_remote_address, default_limits=["10000 per day", "100 per minute"]
)
# MongoDB
client = MongoClient(os.environ.get("DATABASE_URL"), retryWrites=False)
db = client[os.environ.get("DATABASE_DB")]
collection = db[os.environ.get("DATABASE_COLLECTION")]
# Errors
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({"error": "Bad request"}), 400)
@app.errorhandler(401)
def unauthorized_access(error):
return make_response(jsonify({"error": "Unauthorized access"}), 401)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({"error": "Not found"}), 404)
@app.errorhandler(413)
def payload_too_large(error):
return make_response(jsonify({"error": "Payload too large"}), 413)
@app.errorhandler(429)
def too_many_requests(error):
return make_response(jsonify({"error": "Too many requests"}), 429)
# Helpers
@auth.get_password
def get_password(username):
if username == os.environ.get("DATABASE_USER"):
return os.environ.get("DATABASE_PASSWORD")
return None
def validate_userid(f):
@wraps(f)
def wrapper(*args, **kwargs):
user_id = kwargs["user_id"]
if not all(c in hexdigits for c in user_id) or not len(user_id) == 32:
abort(400)
if (
collection.find_one_and_update(
{"hash": user_id}, {"$set": {"last-Request": datetime.utcnow()}}
)
is None
):
abort(404)
return f(*args, **kwargs)
return wrapper
def validate_item(f):
@wraps(f)
def wrapper(*args, **kwargs):
user_id = kwargs["user_id"]
item = kwargs["item"]
data = collection.find_one({"hash": user_id})["data"]
if item not in data:
abort(404)
return f(*args, **kwargs)
return wrapper
def limit_content_length(max_length, accumulative=False):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
cl = request.content_length
if accumulative is True:
user_id = kwargs["user_id"]
item = kwargs["item"]
data = collection.find_one({"hash": user_id})["data"]
free_space = (
max_length - len(data) + len(data[item] if item in data else "")
)
else:
free_space = max_length
if cl is not None and cl > free_space:
abort(413)
return f(*args, **kwargs)
return wrapper
return decorator
def make_public_data(data, item="data"):
return {item: data[item]}
def make_public_user(user):
new_user = {}
for field in user:
if field == "_id":
new_user[field] = str(user[field])
else:
new_user[field] = user[field]
return new_user
# Admin
@app.route("/admin/", methods=["GET"])
@auth.login_required
def get_all():
cursor = collection.find({})
all_data = [make_public_user(user) for user in cursor]
return jsonify({"collection": all_data})
@app.route("/admin/", methods=["DELETE"])
@auth.login_required
def delete_all():
result = collection.update_many({}, {"$set": {"data": {}}})
return jsonify({"result": True, "deleted": result.modified_count})
@app.route("/admin/destroy", methods=["DELETE"])
@auth.login_required
def reset_all():
result = collection.delete_many({})
return jsonify({"result": True, "deleted": result.deleted_count})
# Public
@app.route(
"/", methods=["GET"], defaults={"user_id": "4bd19e518d90d816fb283cf09d6498bf"}
)
@app.route("/<user_id>", methods=["GET"])
@validate_userid
def get_data(user_id):
user = collection.find_one({"hash": user_id})
return jsonify(make_public_data(user))
@app.route("/<user_id>/<item>", methods=["GET"])
@validate_userid
@validate_item
def get_item(user_id, item):
data = collection.find_one({"hash": user_id})["data"]
return jsonify(make_public_data(data, item))
@app.route(
"/",
methods=["POST", "PUT"],
defaults={"user_id": "4bd19e518d90d816fb283cf09d6498bf"},
)
@app.route("/<user_id>", methods=["POST", "PUT"])
@validate_userid
@limit_content_length(5 * 1024 * 1024)
def update_data(user_id):
user = collection.find_one_and_update(
{"hash": user_id},
{"$set": {"data": request.json}},
return_document=ReturnDocument.AFTER,
)
status = 201 if request.method == "POST" else 200
return jsonify(make_public_data(user)), status
@app.route("/<user_id>/<item>", methods=["POST", "PUT"])
@validate_userid
@limit_content_length(5 * 1024 * 1024, True)
def update_item(user_id, item):
data = collection.find_one_and_update(
{"hash": user_id},
{"$set": {"data." + item: request.json}},
return_document=ReturnDocument.AFTER,
)["data"]
status = 201 if request.method == "POST" else 200
return jsonify(make_public_data(data, item)), status
@app.route(
"/", methods=["DELETE"], defaults={"user_id": "4bd19e518d90d816fb283cf09d6498bf"}
)
@app.route("/<user_id>", methods=["DELETE"])
@validate_userid
def delete_data(user_id):
collection.find_one_and_update({"hash": user_id}, {"$set": {"data": {}}})
return jsonify({"result": True})
@app.route("/<user_id>/<item>", methods=["DELETE"])
@validate_userid
@validate_item
def delete_item(user_id, item):
collection.find_one_and_update({"hash": user_id}, {"$unset": {"data." + item: ""}})
return jsonify({"result": True})
# Signup
@app.route("/signup", methods=["POST"])
@limit_content_length(1024)
def create_user():
user = {}
user["email"] = request.form["email"]
if not re.match(
r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", user["email"]
):
abort(400)
hash = md5(user["email"].encode("utf-8")).hexdigest()
user["hash"] = hash
if not collection.find_one({"hash": hash}):
user["data"] = {}
user["created-At"] = datetime.utcnow()
collection.insert_one(user)
return jsonify({"hash": hash}), 201, {"Access-Control-Allow-Origin": "*"}
# Run
if __name__ == "__main__":
app.run(debug=True)
| |
from __future__ import absolute_import
from __future__ import print_function
import os
import shutil
import subprocess
import getpass
import pexpect
from insolater import version_tools as vt
class Insolater(object):
_CMD = "inso"
_NOT_INIT_MESSAGE = "No session found. See '{cmd} init <remote changes>'".format(cmd=_CMD)
_ALREADY_INIT_MESSAGE = (
"Already initialized. To end session use: {cmd} exit [<remote backup>]".format(cmd=_CMD))
def __init__(self, repo=".insolater_repo", timeout=5, filepattern="."):
self.repo = os.path.normpath(repo)
self.timeout = timeout
self.filepattern = filepattern.split()
#TODO: _get_repo_path()
#: make an .inso_include file
#:: inso pattern [--add|set|clear|all] none
#: keep track of current version
#:: don't allow edits to ORIG
#::: either Update ORIG, Discard, Move to CHANGES, or Send to remote
#TODO: apply (merge changes into ORIG)
def init(self, remote_changes=None):
"""Create repo and store original files. Optionally retrieve version remotely."""
self._verify_repo_exists(False)
vt.init(self.repo)
if remote_changes:
self.pull_version(remote_changes)
return "Initialized repository with versions: original"
def current_version(self):
"""Returns the current version."""
self._verify_repo_exists(True)
return vt.current_version(self.repo)
def all_versions(self):
self._verify_repo_exists(True)
return vt.all_versions(self.repo)
def change_version(self, version):
"""Save changes and switch to the specified version."""
self._verify_repo_exists(True)
#TODO: save changes? when in original
if vt.current_version(self.repo) == 'original':
vt.save_version(self.repo, '_original_edit')
else:
vt.save_version(self.repo)
vt.open_version(self.repo, version)
if vt.current_version(self.repo) == version:
return "Switched to %s" % version
else:
return "Version not found: %s" % version
def save_version(self, version='', overwrite=None):
"""Save/create and open a version with the specified name.
Fails if version name starts with '_'.
Prompts if version already exists unless overwrite is set."""
self._verify_repo_exists(True)
#TODO: better version name checking
cv = vt.current_version(self.repo)
version = version or cv
if version[0] == '_':
return "Invalid version name: %s" % version
if not vt.is_version(self.repo, version):
vt.save_version(self.repo, version)
vt.open_version(self.repo, version)
return "Version %s created and opened" % version
if overwrite is None:
discard = raw_input("Do you want to overwrite changes (y/[n]): ")
overwrite = discard.lower() != 'y'
if overwrite:
vt.save_version(self.repo, version)
vt.open_version(self.repo, version)
return "Version %s saved and opened" % version
return "Aborted to avoid overwriting."
def delete_version(self, version):
"""Delete the specified version.
Fail if the version does not exists, version is 'original' or it is the current version."""
self._verify_repo_exists(True)
if not vt.is_version(self.repo, version):
return "Version not found: %s" % version
if vt.current_version(self.repo) == version:
return "Cannot delete current version: %s" % version
if version == 'original':
return "Cannot delete original version"
vt.delete_version(self.repo, version)
return "Version %s deleted" % version
def pull_version(self, remote_changes, version=''):
"""Pull remote changes into specified version.
If no version is specified pull into current version."""
self._verify_repo_exists(True)
cv = vt.current_version(self.repo)
if version:
self.save_version(version)
self.change_version(version)
for f in os.listdir('.'):
if f != self.repo:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
retv = self._run("rsync -Pravdtze ssh {0}/* .".format(os.path.normpath(remote_changes)))[0]
if version != '' and version != cv:
self.change_version(cv)
if (retv != 0):
raise Exception("Failed to sync changes")
return "Pulled updates"
def push_version(self, remote_location, version=''):
"""Push specified version to remote location.
If no version is specified push the current version."""
self._verify_repo_exists(True)
cv = vt.current_version(self.repo)
if version and version != cv:
if not vt.is_version(self.repo, version):
return "Version not found: %s" % version
self.change_version(version)
pswd = getpass.getpass(remote_location.split(':')[0] + "'s password:")
transfer_str = ""
for f in os.listdir('.'):
if f == self.repo:
continue
rsync = "rsync -R -Pravdtze ssh " + f + " " + remote_location
exitstatus = 0
try:
exitstatus = self._run_with_password(rsync, pswd, self.timeout)[0]
except pexpect.TIMEOUT:
raise Exception("Aborted (File transfer timeouted out)")
finally:
self.change_version(cv)
if exitstatus != 0:
transfer_str += f + " \t\tFailed to transfer\n"
raise Exception(transfer_str + "Aborted (File transfer failed).")
transfer_str += f + " \t\ttransfered\n"
return transfer_str
def exit(self, version='original', discard_changes=None):
"""Open original or specified version files, and delete all other versions
(delete repo)."""
self._verify_repo_exists(True)
if not vt.is_version(self.repo, version):
return "Version not found: %s" % version
if discard_changes is None:
discard = raw_input("Do you want to discard all changes (y/[n]): ")
discard_changes = discard.lower() == 'y'
if not discard_changes:
return "Aborted to avoid discarding changes."
vt.open_version(self.repo, version)
shutil.rmtree(self.repo)
return "Session Ended"
def _verify_repo_exists(self, exists):
"""(may change pythons current directory).
raise an exception if the repo does not have the specfied state of being."""
if exists:
if not os.path.exists(self.repo):
if os.getcwd() == '/':
raise Exception(Insolater._NOT_INIT_MESSAGE)
else:
os.chdir('..')
self._verify_repo_exists(exists)
else:
if os.path.exists(self.repo):
raise Exception(Insolater._ALREADY_INIT_MESSAGE)
def _run(self, command):
"""Replace instances of {repo} with self.repo and run the command."""
command = command.format(repo=self.repo)
proc = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
exit_code = proc.poll()
return exit_code, out, err
def _run_git(self, command):
"""Runs git --git-dir={repo} command."""
return self._run("git --git-dir={repo} " + command)
def _run_git_add(self):
"""Runs git --git-dir={repo} add -A for each filepattern."""
sh = ""
for fp in self.filepattern:
sh += "git --git-dir={repo} add -A " + fp + ";".format(fp)
return self._run(sh)
def _run_with_password(self, command, pswd, timeout=5):
"""Replace instances of {repo} with self.repo and run the command using the password."""
proc = pexpect.spawn(command.format(repo=self.repo))
proc.expect('password:')
proc.sendline(pswd)
proc.expect(pexpect.EOF, timeout=timeout)
return proc.isalive(), proc.read(), ''
| |
# -*- coding: utf-8 -*-
"""
Smirnov-Grubbs test for outlier detection.
"""
import numpy as np
from scipy import stats
from math import sqrt
from collections import defaultdict
try:
import pandas as pd
except ImportError:
pd = None
__all__ = ['test',
'two_sided_test',
'two_sided_test_indices',
'two_sided_test_outliers',
'min_test',
'min_test_indices',
'min_test_outliers',
'max_test',
'max_test_indices',
'max_test_outliers',
'TwoSidedGrubbsTest',
'MinValueGrubbsTest',
'MaxValueGrubbsTest',
'OutputType']
DEFAULT_ALPHA = 0.95
# Test output types
class OutputType:
DATA = 0 # Output data without outliers
OUTLIERS = 1 # Output outliers
INDICES = 2 # Output outlier indices
class GrubbsTest(object):
def __init__(self, data):
self.original_data = data
def _copy_data(self):
if isinstance(self.original_data, np.ndarray):
return self.original_data
elif pd is not None and isinstance(self.original_data, pd.Series):
return self.original_data
elif isinstance(self.original_data, list):
return np.array(self.original_data)
else:
raise TypeError('Unsupported data format')
def _delete_item(self, data, index):
if pd is not None and isinstance(data, pd.Series):
return data.drop(index)
elif isinstance(data, np.ndarray):
return np.delete(data, index)
else:
raise TypeError('Unsupported data format')
def _get_indices(self, values):
last_seen = defaultdict(lambda: 0)
data = list(self.original_data)
indices = list()
for value in values:
start = last_seen[value]
index = data.index(value, start)
indices.append(index)
last_seen[value] = index + 1
return indices
def _get_g_test(self, data, alpha):
"""Compute a significant value score following these steps, being alpha
the requested significance level:
1. Find the upper critical value of the t-distribution with n-2
degrees of freedom and a significance level of alpha/2n
(for two-sided tests) or alpha/n (for one-sided tests).
2. Use this t value to find the score with the following formula:
((n-1) / sqrt(n)) * (sqrt(t**2 / (n-2 + t**2)))
:param numpy.array data: data set
:param float alpha: significance level
:return: G_test score
"""
n = len(data)
significance_level = self._get_t_significance_level(alpha, n)
t = stats.t.isf(significance_level, n-2)
return ((n-1) / sqrt(n)) * (sqrt(t**2 / (n-2 + t**2)))
def _test_once(self, data, alpha):
"""Perform one iteration of the Smirnov-Grubbs test.
:param numpy.array data: data set
:param float alpha: significance level
:return: the index of the outlier if one if found; None otherwise
"""
target_index, value = self._target(data)
g = value / data.std()
g_test = self._get_g_test(data, alpha)
return target_index if g > g_test else None
def run(self, alpha=DEFAULT_ALPHA, output_type=OutputType.DATA):
"""Run the Smirnov-Grubbs test to remove outliers in the given data set.
:param float alpha: significance level
:param int output_type: test output type (from OutputType class values)
:return: depending on the value of output_type, the data set without
outliers (DATA), the outliers themselves (OUTLIERS) or the indices of
the outliers in the original data set (INDICES)
"""
data = self._copy_data()
outliers = list()
while True:
outlier_index = self._test_once(data, alpha)
if outlier_index is None:
break
outlier = data[outlier_index]
outliers.append(outlier)
data = self._delete_item(data, outlier_index)
return_value = data
if output_type == OutputType.OUTLIERS:
return_value = outliers
elif output_type == OutputType.INDICES:
return_value = self._get_indices(outliers)
return return_value
def _target(self, data):
raise NotImplementedError
def _get_t_significance_level(self, alpha):
raise NotImplementedError
class TwoSidedGrubbsTest(GrubbsTest):
def _target(self, data):
"""Compute the index of the farthest value from the sample mean and its
distance.
:param numpy.array data: data set
:return int, float: the index of the element and its distance to the
mean
"""
relative_values = abs(data - data.mean())
index = relative_values.argmax()
value = relative_values[index]
return index, value
def _get_t_significance_level(self, alpha, n):
return alpha / (2*n)
class OneSidedGrubbsTest(GrubbsTest):
def _target(self, data):
"""Compute the index of the min/max value and its distance from the
sample mean.
:param numpy.array data: data set
:return int, float: the index of the min/max value and its distance to
the mean
"""
index = self._get_index(data)
value = data[index]
return index, abs(value - data.mean())
def _get_t_significance_level(self, alpha, n):
return alpha / n
class MinValueGrubbsTest(OneSidedGrubbsTest):
def _get_index(self, data):
return data.argmin()
class MaxValueGrubbsTest(OneSidedGrubbsTest):
def _get_index(self, data):
return data.argmax()
# Convenience functions to run single Grubbs tests
def _test(test_class, data, alpha, output_type):
return test_class(data).run(alpha, output_type=output_type)
def _two_sided_test(data, alpha, output_type):
return _test(TwoSidedGrubbsTest, data, alpha, output_type)
def _min_test(data, alpha, output_type):
return _test(MinValueGrubbsTest, data, alpha, output_type)
def _max_test(data, alpha, output_type):
return _test(MaxValueGrubbsTest, data, alpha, output_type)
def two_sided_test(data, alpha=DEFAULT_ALPHA):
return _two_sided_test(data, alpha, OutputType.DATA)
def two_sided_test_indices(data, alpha=DEFAULT_ALPHA):
return _two_sided_test(data, alpha, OutputType.INDICES)
def two_sided_test_outliers(data, alpha=DEFAULT_ALPHA):
return _two_sided_test(data, alpha, OutputType.OUTLIERS)
def min_test(data, alpha=DEFAULT_ALPHA):
return _min_test(data, alpha, OutputType.DATA)
def min_test_indices(data, alpha=DEFAULT_ALPHA):
return _min_test(data, alpha, OutputType.INDICES)
def min_test_outliers(data, alpha=DEFAULT_ALPHA):
return _min_test(data, alpha, OutputType.OUTLIERS)
def max_test(data, alpha=DEFAULT_ALPHA):
return _max_test(data, alpha, OutputType.DATA)
def max_test_indices(data, alpha=DEFAULT_ALPHA):
return _max_test(data, alpha, OutputType.INDICES)
def max_test_outliers(data, alpha=DEFAULT_ALPHA):
return _max_test(data, alpha, OutputType.OUTLIERS)
def test(data, alpha=DEFAULT_ALPHA):
return two_sided_test(data, alpha)
| |
"""
Simple table class.
Note that this module depends only on the Python standard library.
You can "install" it just by dropping it into your working directory.
A SimpleTable is inherently (but not rigidly) rectangular.
You should create it from a *rectangular* (2d!) iterable of data.
A SimpleTable can be concatenated with another SimpleTable
or extended by another SimpleTable. ::
table1.extend_right(table2)
table1.extend(table2)
Note that although a SimpleTable allows only one column (the first) of
stubs at initilization, concatenation of tables allows you to produce
tables with interior stubs. (You can also assign the datatype 'stub'
to the cells in any column, or use ``insert_stubs``.)
A SimpleTable can be initialized with `datatypes`: a list of ints that
provide indexes into `data_fmts` and `data_aligns`. Each data cell is
assigned a datatype, which will control formatting. If you do not
specify the `datatypes` list, it will be set to ``range(ncols)`` where
`ncols` is the number of columns in the data. (I.e., cells in a
column have their own datatype.) This means that you can just specify
`data_fmts` without bother to provide a `datatypes` list. If
``len(datatypes)<ncols`` then datatype assignment will cycle across a
row. E.g., if you provide 10 rows of data with ``datatypes=[0,1]``
then you will have 5 columns of datatype 0 and 5 columns of datatype
1, alternating. Correspoding to this specification, you should provide
a list of two ``data_fmts`` and a list of two ``data_aligns``.
Potential problems for Python 3
-------------------------------
- Calls ``next`` instead of ``__next__``.
The 2to3 tool should handle that no problem.
(We will switch to the `next` function if 2.5 support is ever dropped.)
- from __future__ import division, with_statement
- from itertools import izip as zip
- Let me know if you find other problems.
:contact: alan dot isaac at gmail dot com
:requires: Python 2.5.1+
:note: current version
:note: HTML data format currently specifies tags
:todo: support a bit more of http://www.oasis-open.org/specs/tr9503.html
:todo: add colspan support to Cell
:since: 2008-12-21
:change: 2010-05-02 eliminate newlines that came before and after table
"""
from __future__ import division, with_statement
try: #accommodate Python 3
from itertools import izip as zip
except ImportError:
pass
from itertools import cycle
from collections import defaultdict
import csv
def csv2st(csvfile, headers=False, stubs=False, title=None):
"""Return SimpleTable instance,
created from the data in `csvfile`,
which is in comma separated values format.
The first row may contain headers: set headers=True.
The first column may contain stubs: set stubs=True.
Can also supply headers and stubs as tuples of strings.
"""
rows = list()
with open(csvfile,'r') as fh:
reader = csv.reader(fh)
if headers is True:
headers = reader.next()
elif headers is False:
headers=()
if stubs is True:
stubs = list()
for row in reader:
if row:
stubs.append(row[0])
rows.append(row[1:])
else: #no stubs, or stubs provided
for row in reader:
if row:
rows.append(row)
if stubs is False:
stubs = ()
nrows = len(rows)
ncols = len(rows[0])
if any(len(row)!=ncols for row in rows):
raise IOError('All rows of CSV file must have same length.')
return SimpleTable(data=rows, headers=headers, stubs=stubs)
class SimpleTable(list):
"""Produce a simple ASCII, CSV, HTML, or LaTeX table from a
*rectangular* (2d!) array of data, not necessarily numerical.
Directly supports at most one header row,
which should be the length of data[0].
Directly supports at most one stubs column,
which must be the length of data.
(But see `insert_stubs` method.)
See globals `default_txt_fmt`, `default_csv_fmt`, `default_html_fmt`,
and `default_latex_fmt` for formatting options.
Sample uses::
mydata = [[11,12],[21,22]] # data MUST be 2-dimensional
myheaders = [ "Column 1", "Column 2" ]
mystubs = [ "Row 1", "Row 2" ]
tbl = sm.iolib.SimpleTable(mydata, myheaders, mystubs, title="Title")
print( tbl )
print( tbl.as_html() )
# set column specific data formatting
tbl = sm.iolib.SimpleTable(mydata, myheaders, mystubs,
fmt={'data_fmts':["%3.2f","%d"]})
print( tbl.as_csv() )
with open('./temp.tex','w') as fh:
fh.write( tbl.as_latex_tabular() )
"""
def __init__(self, data, headers=None, stubs=None, title='',
datatypes=None,
csv_fmt=None, txt_fmt=None, ltx_fmt=None, html_fmt=None,
celltype= None, rowtype=None,
**fmt_dict):
"""
Parameters
----------
data : list of lists or 2d array (not matrix!)
R rows by K columns of table elements
headers : list (or tuple) of str
sequence of K strings, one per header
stubs : list (or tuple) of str
sequence of R strings, one per stub
title : string
title of the table
datatypes : list of int
indexes to `data_fmts`
txt_fmt : dict
text formatting options
ltx_fmt : dict
latex formatting options
csv_fmt : dict
csv formatting options
hmtl_fmt : dict
hmtl formatting options
celltype : class
the cell class for the table (default: Cell)
rowtype : class
the row class for the table (default: Row)
fmt_dict : dict
general formatting options
"""
#self._raw_data = data
self.title = title
self._datatypes = datatypes or range(len(data[0]))
#start with default formatting
self._text_fmt = default_txt_fmt.copy()
self._latex_fmt = default_latex_fmt.copy()
self._csv_fmt = default_csv_fmt.copy()
self._html_fmt = default_html_fmt.copy()
#substitute any general user specified formatting
#:note: these will be overridden by output specific arguments
self._csv_fmt.update(fmt_dict)
self._text_fmt.update(fmt_dict)
self._latex_fmt.update(fmt_dict)
self._html_fmt.update(fmt_dict)
#substitute any output-type specific formatting
self._csv_fmt.update(csv_fmt or dict())
self._text_fmt.update(txt_fmt or dict())
self._latex_fmt.update(ltx_fmt or dict())
self._html_fmt.update(html_fmt or dict())
self.output_formats = dict(
text=self._text_fmt,
txt=self._text_fmt,
csv=self._csv_fmt,
htm=self._html_fmt,
html=self._html_fmt,
latex=self._latex_fmt,
ltx=self._latex_fmt)
self._Cell = celltype or Cell
self._Row = rowtype or Row
rows = self._data2rows(data) # a list of Row instances
list.__init__(self, rows)
self._add_headers_stubs(headers, stubs)
def __str__(self):
return self.as_text()
def _add_headers_stubs(self, headers, stubs):
"""Return None. Adds headers and stubs to table,
if these were provided at initialization.
Parameters
----------
headers : list of strings
K strings, where K is number of columns
stubs : list of strings
R strings, where R is number of non-header rows
:note: a header row does not receive a stub!
"""
_Cell = self._Cell
_Row = self._Row
if headers:
headers = [ _Cell(h,datatype='header') for h in headers ]
headers = _Row(headers, datatype='header')
headers.table = self
for cell in headers:
cell.row = headers
self.insert(0, headers)
if stubs:
self.insert_stubs(0, stubs)
def _data2rows(self, raw_data):
"""Return list of Row,
the raw data as rows of cells.
"""
_Cell = self._Cell
_Row = self._Row
rows = []
for datarow in raw_data:
dtypes = cycle(self._datatypes)
newrow = _Row([_Cell(datum) for datum in datarow])
newrow.table = self #row knows its SimpleTable
for cell in newrow:
cell.datatype = dtypes.next()
cell.row = newrow #a cell knows its row
rows.append(newrow)
return rows
def pad(self, s, width, align):
"""DEPRECATED: just use the pad function"""
return pad(s, width, align)
def get_colwidths(self, output_format, **fmt_dict):
fmt = self.output_formats[output_format].copy()
fmt.update(fmt_dict)
ncols = max(len(row) for row in self)
request = fmt.get('colwidths')
if request is 0: #assume no extra space desired (e.g, CSV)
return [0] * ncols
elif request is None: #assume no extra space desired (e.g, CSV)
request = [0] * ncols
elif isinstance(request, int):
request = [request] * ncols
elif len(request) < ncols:
request = [request[i%len(request)] for i in range(ncols)]
min_widths = []
for col in zip(*self):
maxwidth = max(len(c.format(0,output_format,**fmt)) for c in col)
min_widths.append(maxwidth)
result = map(max, min_widths, request)
return result
def _get_fmt(self, output_format, **fmt_dict):
"""Return dict, the formatting options.
"""
#first get the default formatting
try:
fmt = self.output_formats[output_format].copy()
except KeyError:
raise ValueError('Unknown format: %s' % output_format)
#then, add formatting specific to this call
fmt.update(fmt_dict)
return fmt
def as_csv(self, **fmt_dict):
"""Return string, the table in CSV format.
Currently only supports comma separator."""
#fetch the format, which may just be default_csv_format
fmt = self._get_fmt('csv', **fmt_dict)
return self.as_text(**fmt)
def as_text(self, **fmt_dict):
"""Return string, the table as text."""
#fetch the text format, override with fmt_dict
fmt = self._get_fmt('txt', **fmt_dict)
#get rows formatted as strings
formatted_rows = [ row.as_string('text', **fmt) for row in self ]
rowlen = len(formatted_rows[-1]) #don't use header row
#place decoration above the table body, if desired
table_dec_above = fmt.get('table_dec_above','=')
if table_dec_above:
formatted_rows.insert(0, table_dec_above * rowlen)
#next place a title at the very top, if desired
#:note: user can include a newlines at end of title if desired
title = self.title
if title:
title = pad(self.title, rowlen, fmt.get('title_align','c'))
formatted_rows.insert(0, title)
#add decoration below the table, if desired
table_dec_below = fmt.get('table_dec_below','-')
if table_dec_below:
formatted_rows.append(table_dec_below * rowlen)
return '\n'.join(formatted_rows)
def as_html(self, **fmt_dict):
"""Return string.
This is the default formatter for HTML tables.
An HTML table formatter must accept as arguments
a table and a format dictionary.
"""
#fetch the text format, override with fmt_dict
fmt = self._get_fmt('html', **fmt_dict)
formatted_rows = ['<table class="simpletable">']
if self.title:
title = '<caption>%s</caption>' % self.title
formatted_rows.append(title)
formatted_rows.extend( row.as_string('html', **fmt) for row in self )
formatted_rows.append('</table>')
return '\n'.join(formatted_rows)
def as_latex_tabular(self, **fmt_dict):
'''Return string, the table as a LaTeX tabular environment.
Note: will equire the booktabs package.'''
#fetch the text format, override with fmt_dict
fmt = self._get_fmt('latex', **fmt_dict)
aligns = self[-1].get_aligns('latex', **fmt)
formatted_rows = [ r'\begin{tabular}{%s}' % aligns ]
table_dec_above = fmt['table_dec_above']
if table_dec_above:
formatted_rows.append(table_dec_above)
formatted_rows.extend(
row.as_string(output_format='latex', **fmt) for row in self )
table_dec_below = fmt['table_dec_below']
if table_dec_below:
formatted_rows.append(table_dec_below)
formatted_rows.append(r'\end{tabular}')
#tabular does not support caption, but make it available for figure environment
if self.title:
title = r'%%\caption{%s}' % self.title
formatted_rows.append(title)
return '\n'.join(formatted_rows)
"""
if fmt_dict['strip_backslash']:
ltx_stubs = [stub.replace('\\',r'$\backslash$') for stub in self.stubs]
ltx_headers = [header.replace('\\',r'$\backslash$') for header in self.headers]
ltx_headers = self.format_headers(fmt_dict, ltx_headers)
else:
ltx_headers = self.format_headers(fmt_dict)
ltx_stubs = self.format_stubs(fmt_dict, ltx_stubs)
"""
def extend_right(self, table):
"""Return None.
Extend each row of `self` with corresponding row of `table`.
Does **not** import formatting from ``table``.
This generally makes sense only if the two tables have
the same number of rows, but that is not enforced.
:note: To extend append a table below, just use `extend`,
which is the ordinary list method. This generally makes sense
only if the two tables have the same number of columns,
but that is not enforced.
"""
for row1, row2 in zip(self, table):
row1.extend(row2)
def insert_stubs(self, loc, stubs):
"""Return None. Insert column of stubs at column `loc`.
If there is a header row, it gets an empty cell.
So ``len(stubs)`` should equal the number of non-header rows.
"""
_Cell = self._Cell
stubs = iter(stubs)
for row in self:
if row.datatype == 'header':
empty_cell = _Cell('', datatype='empty')
row.insert(loc, empty_cell)
else:
row.insert_stub(loc, stubs.next())
@property
def data(self):
return [row.data for row in self]
#END: class SimpleTable
def pad(s, width, align):
"""Return string padded with spaces,
based on alignment parameter."""
if align == 'l':
s = s.ljust(width)
elif align == 'r':
s = s.rjust(width)
else:
s = s.center(width)
return s
class Row(list):
"""A Row is a list of cells;
a row can belong to a SimpleTable.
"""
def __init__(self, cells, datatype='', table=None, celltype=None, **fmt_dict):
"""
Parameters
----------
table : SimpleTable
"""
list.__init__(self, cells)
self.datatype = datatype # data or header
self.table = table
if celltype is None:
try:
celltype = table._Cell
except AttributeError:
celltype = Cell
self._Cell = celltype
self._fmt = fmt_dict
def insert_stub(self, loc, stub):
"""Return None. Inserts a stub cell
in the row at `loc`.
"""
_Cell = self._Cell
if not isinstance(stub, _Cell):
stub = stub
stub = _Cell(stub, datatype='stub', row=self)
self.insert(loc, stub)
def _get_fmt(self, output_format, **fmt_dict):
"""Return dict, the formatting options.
"""
#first get the default formatting
try:
fmt = default_fmts[output_format].copy()
except KeyError:
raise ValueError('Unknown format: %s' % output_format)
#second get table specific formatting (if possible)
try:
fmt.update(self.table.output_formats[output_format])
except AttributeError:
pass
#finally, add formatting for this cell and this call
fmt.update(self._fmt)
fmt.update(fmt_dict)
return fmt
def get_aligns(self, output_format, **fmt_dict):
"""Return string, sequence of column alignments.
Ensure comformable data_aligns in `fmt_dict`."""
fmt = self._get_fmt(output_format, **fmt_dict)
return ''.join( cell.alignment(output_format, **fmt) for cell in self )
def as_string(self, output_format='txt', **fmt_dict):
"""Return string: the formatted row.
This is the default formatter for rows.
Override this to get different formatting.
A row formatter must accept as arguments
a row (self) and an output format,
one of ('html', 'txt', 'csv', 'latex').
"""
fmt = self._get_fmt(output_format, **fmt_dict)
#get column widths
try:
colwidths = self.table.get_colwidths(output_format, **fmt)
except AttributeError:
colwidths = fmt.get('colwidths')
if colwidths is None:
colwidths = (0,) * len(self)
colsep = fmt['colsep']
row_pre = fmt.get('row_pre','')
row_post = fmt.get('row_post','')
formatted_cells = []
for cell, width in zip(self, colwidths):
content = cell.format(width, output_format=output_format, **fmt)
formatted_cells.append(content)
header_dec_below = fmt.get('header_dec_below')
formatted_row = row_pre + colsep.join(formatted_cells) + row_post
if self.datatype == 'header' and header_dec_below:
formatted_row = self.decorate_header(formatted_row, output_format, header_dec_below)
return formatted_row
def decorate_header(self, header_as_string, output_format, header_dec_below):
"""This really only makes sense for the text and latex output formats."""
if output_format in ('text','txt'):
row0len = len(header_as_string)
result = header_as_string + "\n" + (header_dec_below * row0len)
elif output_format == 'latex':
result = header_as_string + "\n" + header_dec_below
else:
raise ValueError("I can't decorate a %s header."%output_format)
return result
@property
def data(self):
return [cell.data for cell in self]
#END class Row
class Cell(object):
def __init__(self, data='', datatype=0, row=None, **fmt_dict):
self.data = data
self.datatype = datatype
self.row = row
self._fmt = fmt_dict
def __str__(self):
return '%s' % self.data
def _get_fmt(self, output_format, **fmt_dict):
"""Return dict, the formatting options.
"""
#first get the default formatting
try:
fmt = default_fmts[output_format].copy()
except KeyError:
raise ValueError('Unknown format: %s' % output_format)
#then get any table specific formtting
try:
fmt.update(self.row.table.output_formats[output_format])
except AttributeError:
pass
#then get any row specific formtting
try:
fmt.update(self.row._fmt)
except AttributeError:
pass
#finally add formatting for this instance and call
fmt.update(self._fmt)
fmt.update(fmt_dict)
return fmt
def alignment(self, output_format, **fmt_dict):
fmt = self._get_fmt(output_format, **fmt_dict)
datatype = self.datatype
data_aligns = fmt.get('data_aligns','c')
if isinstance(datatype, int):
align = data_aligns[datatype % len(data_aligns)]
elif datatype == 'header':
align = fmt.get('header_align','c')
elif datatype == 'stub':
align = fmt.get('stubs_align','c')
elif datatype == 'empty':
align = 'c'
else:
raise ValueError('Unknown cell datatype: %s'%datatype)
return align
def format(self, width, output_format='txt', **fmt_dict):
"""Return string.
This is the default formatter for cells.
Override this to get different formating.
A cell formatter must accept as arguments
a cell (self) and an output format,
one of ('html', 'txt', 'csv', 'latex').
It will generally respond to the datatype,
one of (int, 'header', 'stub').
"""
fmt = self._get_fmt(output_format, **fmt_dict)
data = self.data
datatype = self.datatype
data_fmts = fmt.get('data_fmts')
if data_fmts is None:
#chk allow for deprecated use of data_fmt
data_fmt = fmt.get('data_fmt')
if data_fmt is None:
data_fmt = '%s'
data_fmts = [data_fmt]
data_aligns = fmt.get('data_aligns','c')
if isinstance(datatype, int):
datatype = datatype % len(data_fmts) #constrain to indexes
content = data_fmts[datatype] % data
elif datatype == 'header':
content = fmt.get('header_fmt','%s') % data
elif datatype == 'stub':
content = fmt.get('stub_fmt','%s') % data
elif datatype == 'empty':
content = fmt.get('empty_cell','')
else:
raise ValueError('Unknown cell datatype: %s'%datatype)
align = self.alignment(output_format, **fmt)
return pad(content, width, align)
#END class Cell
######### begin: default formats for SimpleTable ##############
""" Some formatting suggestions:
- if you want rows to have no extra spacing,
set colwidths=0 and colsep=''.
(Naturally the columns will not align.)
- if you want rows to have minimal extra spacing,
set colwidths=1. The columns will align.
- to get consistent formatting, you should leave
all field width handling to SimpleTable:
use 0 as the field width in data_fmts. E.g., ::
data_fmts = ["%#0.6g","%#0.6g","%#0.4g","%#0.4g"],
colwidths = 14,
data_aligns = "r",
"""
default_csv_fmt = dict(
data_fmts = ['%s'],
data_fmt = '%s', #deprecated; use data_fmts
empty_cell = '',
colwidths = None,
colsep = ',',
row_pre = '',
row_post = '',
table_dec_above = '',
table_dec_below = '',
header_dec_below = '',
header_fmt = '"%s"',
stub_fmt = '"%s"',
title_align = '',
header_align = 'c',
data_aligns = "l",
stubs_align = "l",
fmt = 'csv',
)
default_html_fmt = dict(
data_fmts = ['<td>%s</td>'],
data_fmt = "<td>%s</td>", #deprecated; use data_fmts
empty_cell = '<td></td>',
colwidths = None,
colsep=' ',
row_pre = '<tr>\n ',
row_post = '\n</tr>',
table_dec_above=None,
table_dec_below=None,
header_dec_below=None,
header_fmt = '<th>%s</th>',
stub_fmt = '<th>%s</th>',
title_align='c',
header_align = 'c',
data_aligns = "c",
stubs_align = "l",
fmt = 'html',
)
default_txt_fmt = dict(
data_fmts = ["%s"],
data_fmt = "%s", #deprecated; use data_fmts
empty_cell = '',
colwidths = None,
colsep=' ',
row_pre = '',
row_post = '',
table_dec_above='=',
table_dec_below='-',
header_dec_below='-',
header_fmt = '%s',
stub_fmt = '%s',
title_align='c',
header_align = 'c',
data_aligns = "c",
stubs_align = "l",
fmt = 'txt',
)
default_latex_fmt = dict(
data_fmts = ['%s'],
data_fmt = '%s', #deprecated; use data_fmts
empty_cell = '',
colwidths = None,
colsep=' & ',
table_dec_above = r'\toprule',
table_dec_below = r'\bottomrule',
header_dec_below = r'\midrule',
strip_backslash = True,
header_fmt = r'\textbf{%s}',
stub_fmt =r'\textbf{%s}',
header_align = 'c',
data_aligns = 'c',
stubs_align = 'l',
fmt = 'ltx',
row_post = r' \\'
)
default_fmts = dict(
html= default_html_fmt,
htm= default_html_fmt,
txt=default_txt_fmt,
text=default_txt_fmt,
latex=default_latex_fmt,
ltx=default_latex_fmt,
csv=default_csv_fmt
)
######### end: default formats ##############
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Functions for loading data, setting options, etc.
"""
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from numpy import issubdtype, append, array
from astropy.extern.six import string_types
class BitMask(object):
"""Base class for specific bitmask objects.
Attributes
----------
bits : :func:`tuple`
The individual bits.
"""
bits = (('UNUSED_00', "Unused"))
def flagname(self, value):
"""Convert a flag value into readable names.
Parameters
----------
value : :class:`int`
Mask value.
Returns
-------
:func:`tuple`
The names of the flags.
"""
names = [self.bits[bit][0] for bit in range(len(self.bits))
if (value & (1 << bit)) != 0]
return tuple(names)
def flagval(self, name):
"""Convert a name or set of names into a bitmask value.
Parameters
----------
name : :class:`str` or iterable.
Name(s) of flags to convert.
Returns
-------
:class:`int`
The value of the mask
"""
if isinstance(name, string_types):
name = [name]
names = set(name)
value = 0
for bit in range(len(self.bits)):
if self.bits[bit][0] in names:
value += (1 << bit)
return value
class APOGEE_STARFLAG(BitMask):
"""APOGEE star-level mask bits.
Attributes
----------
bits : :func:`tuple`
The individual bits.
"""
bits = (('BAD_PIXELS', "Spectrum has many bad pixels (>40%): BAD"),
('COMMISSIONING', ("Commissioning data (MJD<55761), " +
"non-standard configuration, poor " +
"LSF: WARN")),
('BRIGHT_NEIGHBOR', "Star has neighbor more than 10 " +
"times brighter: WARN"),
('VERY_BRIGHT_NEIGHBOR', "Star has neighbor more than " +
"100 times brighter: BAD"),
('LOW_SNR', "Spectrum has low S/N (S/N<5): BAD"),
('UNUSED_05', "Unused"),
('UNUSED_06', "Unused"),
('UNUSED_07', "Unused"),
('UNUSED_08', "Unused"),
('PERSIST_HIGH', "Spectrum has significant number " +
"(>20%) of pixels in high persistence " +
"region: WARN"),
('PERSIST_MED', "Spectrum has significant number " +
"(>20%) of pixels in medium persistence " +
"region: WARN"),
('PERSIST_LOW', "Spectrum has significant number " +
"(>20%) of pixels in low persistence " +
"region: WARN"),
('PERSIST_JUMP_POS', "Spectrum show obvious positive " +
"jump in blue chip: WARN"),
('PERSIST_JUMP_NEG', "Spectrum show obvious negative " +
"jump in blue chip: WARN"),
('UNUSED_14', "Unused"),
('UNUSED_15', "Unused"),
('SUSPECT_RV_COMBINATION', "WARNING: RVs from synthetic " +
"template differ significantly from " +
"those from combined template"),
('SUSPECT_BROAD_LINES', "WARNING: cross-correlation peak with " +
"template significantly broader than " +
"autocorrelation of template"),)
def rv_options(description="RV", set_args=None):
"""Set the options typically used by rv.
Parameters
----------
description : :class:`str`, optional
The overall description of the command-line program.
set_args : :class:`list`, optional
A list that will be passed to
:meth:`argparse.ArgumentParser.parse_args`.
Returns
-------
:class:`~argparse.Namespace`
Parsed command-line options.
"""
from os import getenv
from os.path import join
from argparse import ArgumentParser
parser = ArgumentParser(description=description, prog='rvMain')
parser.add_argument('-c', '--clobber', action='store_true', dest='clobber',
help='Overwrite any cache file(s).')
parser.add_argument('-D', '--data-dir', action='store', dest='plotDir',
default=join(getenv('HOME'), 'Desktop', 'apogee-rv'),
metavar='DIR', help='Read data from DIR.')
parser.add_argument('-I', '--no-index', action='store_false', dest='index',
help='Do not regenerate index file.')
parser.add_argument('-m', '--method', action='store', dest='method',
default='L-BFGS-B', metavar='METHOD',
help=('Set the optimization method for ' +
'scipy.optimize.minimize (default "L-BFGS-B").'))
parser.add_argument('-p', '--plot', action='store_true', dest='plot',
help='Produce plots.')
parser.add_argument('-Q', '--q-value', action='store', type=float,
dest='Q', default=0, metavar='Q', help='Set Q value.')
parser.add_argument('-z', '--zero', action='store', type=int,
dest='mjd_zero', metavar='MJD', default=55800,
help='Set zero day to this MJD.')
if set_args is None:
options = parser.parse_args()
else:
options = parser.parse_args(set_args)
return options
class Star(object):
"""Simple object to hold data and metadata about a star.
Parameters
----------
row : :class:`~astropy.io.fits.fitsrec.FITS_record`
Row from a FITS binary table.
mjd_zero : :class:`int`
The offset to the MJD, to make the range of days reasonable.
Attributes
----------
apstar_id : :class:`str`
The "fully qualified" ID of the object.
cas_base_url : :class:`str`
Used to construct links to data on CAS.
commiss : :class:`int`
The commissioning flag.
fit1 : :class:`~scipy.optimize.OptimizeResult`
Placeholder for the best fit information.
fit2 : :class:`~scipy.optimize.OptimizeResult`
Placeholder for the second-best fit information.
jd2mjd : :class:`float`
The offset from Julian Day to Modified Julian Day.
locid : :class:`int`
The location ID.
logg : :class:`float`
The surface gravity.
mh : :class:`float`
The metallicity.
min_visits : :class:`int`
The number of data points should be greater than this for a viable fit.
mjd_zero : :class:`int`
Stores the corresponding input parameter.
sas_base_url : :class:`str`
Used to construct links to data on SAS.
teff : :class:`float`
The effective temperature.
tmassid : :class:`str`
The ID used for targeting the object.
vhelio_avg : :class:`float`
The average heliocentric velocity.
vscatter : :class:`float`
The error on `vhelio_avg`.
"""
sas_base_url = 'http://mirror.sdss3.org/irSpectrumDetail'
cas_base_url = ("http://skyserver.sdss.org/dr12/en/tools/" +
"explore/Summary.aspx?apid=")
jd2mjd = 2400000.5
min_visits = 5 # number of data points required for viable fit.
def __init__(self, row, mjd_zero):
self.mjd_zero = mjd_zero
#
# Try to initialize data as close to the column order as possible.
#
self.apstar_id = str(row['apstar_id'])
foo = self.apstar_id.split('.')
self.commiss = int(foo[2] == 'c')
self.locid = int(foo[4])
self.tmassid = foo[5]
self.teff = float(row['teff'])
self.logg = float(row['logg'])
self.mh = float(row['param_m_h'])
self.vhelio_avg = float(row['vhelio_avg'])
self.vscatter = float(row['vscatter'])
# self.verr = float(row['verr'])
# self.verr_med = float(row['verr'])
self.synthvhelio_avg = float(row['synthvhelio_avg'])
self.synthvscatter = float(row['synthvscatter'])
# self.synthverr = float(row['synthverr'])
# self.synthverr_med = float(row['synthverr'])
self.ORstarflag = int(row['ORstarflag'])
self.ANDstarflag = int(row['ANDstarflag'])
self._visit_list = array((row['visit_id'],))
# self.ra = float(row['ra'])
# self.dec = float(row['dec'])
# self.glon = float(row['glon'])
# self.glat = float(row['glat'])
self._snr_list = array((row['snr'],))
self._mjd_list = array((row['jd'] - self.jd2mjd - self.mjd_zero,))
self._visitstarflag_list = array((row['visitstarflag'],))
self._vhelio_list = array((row['vhelio'],))
self._vrelerr_list = array((row['vrelerr'],))
self._synthvhelio_list = array((row['synthvhelio'],))
self._synthvrelerr_list = array((row['synthvrelerr'],))
#
# Placeholders
#
self._visits = None
self._snr = None
self._mjd = None
self._visitstarflag = None
self._vhelio = None
self._vrelerr = None
self._synthvhelio = None
self._synthvrelerr = None
self._clean = None
self._valid_flags = None
self._nvisits = None
self._json_data = None
self.fit1 = None
self.fit2 = None
return
@property
def sas(self):
"""URL for this object on SAS.
"""
return ("{0.sas_base_url}?locid={0.locid:d}&commiss={0.commiss:d}&" +
"apogeeid={0.tmassid}").format(self)
@property
def cas(self):
"""URL for this object on SkyServer.
"""
return self.cas_base_url + self.apstar_id
@property
def visits(self):
"""Visit IDs corresponding to each observation, excluding bad data.
"""
if self._visits is None:
self._visits = self._visit_list[self.clean]
return self._visits
@property
def snr(self):
"""Signal-to-noise ratio.
"""
if self._snr is None:
self._snr = self._snr_list[self.clean]
return self._snr
@property
def mjd(self):
"""Date of observation.
"""
if self._mjd is None:
self._mjd = self._mjd_list[self.clean]
return self._mjd
@property
def visitstarflag(self):
"""Flags set for each visit.
"""
if self._visitstarflag is None:
self._visitstarflag = self._visitstarflag_list[self.clean]
return self._visitstarflag
@property
def vhelio(self):
"""Heliocentric radial velocity.
"""
if self._vhelio is None:
self._vhelio = self._vhelio_list[self.clean]
return self._vhelio
@property
def vrelerr(self):
"""Radial velocity error.
"""
if self._vrelerr is None:
self._vrelerr = self._vrelerr_list[self.clean]
return self._vrelerr
@property
def synthvhelio(self):
"""Heliocentric radial velocity, based on synthetic spectra.
"""
if self._synthvhelio is None:
self._synthvhelio = self._synthvhelio_list[self.clean]
return self._synthvhelio
@property
def synthvrelerr(self):
"""Radial velocity error, based on synthetic spectra..
"""
if self._synthvrelerr is None:
self._synthvrelerr = self._synthvrelerr_list[self.clean]
return self._synthvrelerr
@property
def clean(self):
"""A boolean array indicating where the velocity has reasonable values.
"""
if self._clean is None:
self._clean = self._vhelio_list < 3e5
return self._clean
@property
def valid_flags(self):
"""Make sure that the flags are self-consistent.
"""
if self._valid_flags is None:
#
# numpy.bitwise_and.identity = 1, so numpy.bitwise_and.reduce
# doesn't work as expected.
# Looks like this is fixed in bleeding-edge versions of numpy,
# but for now...
#
# The overall flag is based on *all* the observations, not just
# the clean ones.
#
sf = APOGEE_STARFLAG()
n = self._visitstarflag_list.size
o = self._visitstarflag_list[0]
a = self._visitstarflag_list[0]
for i in range(1, n):
o |= self._visitstarflag_list[i]
a &= self._visitstarflag_list[i]
o2 = o | sf.flagval('SUSPECT_RV_COMBINATION')
o3 = o | sf.flagval('SUSPECT_BROAD_LINES')
self._valid_flags = (((self.ORstarflag == o) or
(self.ORstarflag == o2) or
(self.ORstarflag == o3) or
(self.ORstarflag == (o2 | o3))) and
(self.ANDstarflag == a))
return self._valid_flags
@property
def nvisits(self):
"""Number of data points, excluding bad data.
"""
if self._nvisits is None:
self._nvisits = len(self.mjd)
return self._nvisits
@property
def fittable(self):
"""``True`` if there are enough data points for a viable fit.
"""
return self.nvisits > self.min_visits
@property
def json(self):
"""Encode the Star data as a dictionary that can be converted to
JSON format.
"""
if self._json_data is None:
sf = APOGEE_STARFLAG()
self._json_data = dict()
self._json_data['mjd_zero'] = self.mjd_zero
self._json_data['apstar_id'] = self.apstar_id
self._json_data['commiss'] = self.commiss
self._json_data['locid'] = self.locid
self._json_data['tmassid'] = self.tmassid
self._json_data['teff'] = self.teff
self._json_data['logg'] = self.logg
self._json_data['mh'] = self.mh
self._json_data['vhelio_avg'] = self.vhelio_avg
self._json_data['vscatter'] = self.vscatter
self._json_data['synthvhelio_avg'] = self.synthvhelio_avg
self._json_data['synthvscatter'] = self.synthvscatter
self._json_data['visits'] = self.visits.tolist()
self._json_data['snr'] = self.snr.tolist()
self._json_data['mjd'] = self.mjd.tolist()
self._json_data['visitstarflag'] = map(sf.flagname,
self.visitstarflag)
self._json_data['vhelio'] = self.vhelio.tolist()
self._json_data['vrelerr'] = self.vrelerr.tolist()
self._json_data['synthvhelio'] = self.synthvhelio.tolist()
self._json_data['synthvrelerr'] = self.synthvrelerr.tolist()
if self.fit1 is None:
self._json_data['fit1_param'] = None
else:
self._json_data['fit1_param'] = self.fit1.x.tolist()
if self.fit2 is None:
self._json_data['fit2_param'] = None
else:
self._json_data['fit2_param'] = self.fit2.x.tolist()
return self._json_data
def append(self, row):
"""Add data to object already initialized.
Parameters
----------
row : :class:`~astropy.io.fits.fitsrec.FITS_record`
Row from a FITS binary table.
Returns
-------
:class:`Star`
Returns the instance, in case you need to chain.
"""
self._visit_list = append(self._visit_list, row['visit_id'])
self._snr_list = append(self._snr_list, row['snr'])
self._mjd_list = append(self._mjd_list,
row['jd'] - self.jd2mjd - self.mjd_zero)
self._visitstarflag_list = append(self._visitstarflag_list,
row['visitstarflag'])
self._vhelio_list = append(self._vhelio_list, row['vhelio'])
self._vrelerr_list = append(self._vrelerr_list, row['vrelerr'])
self._synthvhelio_list = append(self._synthvhelio_list,
row['synthvhelio'])
self._synthvrelerr_list = append(self._synthvrelerr_list,
row['synthvrelerr'])
return self
def rv_data(options, pickleFile='apogee_vrel.pickle',
dataFile='apogee_vrel_v2_weaver.fit'):
"""Load RV data.
Parameters
----------
options : :class:`~argparse.Namespace`
Command-line options.
pickleFile : :class:`str`, optional
Name of the cache file. Defaults to 'apogee_vrel.pickle'.
dataFile : :class:`str`, optional
Name of FITS file. Defaults to 'apogee_vrel_v2_weaver.fit'.
Returns
-------
:class:`~collections.OrderedDict`
The data loaded from disk.
"""
from os import getenv
from os.path import exists, join
import cPickle as pickle
from collections import OrderedDict
import astropy.io.fits as pyfits
#
#
#
f = join(options.plotDir, pickleFile)
if exists(f) and not options.clobber:
with open(f, 'rb') as p:
stars = pickle.load(p)
else:
fit = join(options.plotDir, dataFile)
hdulist = pyfits.open(fit)
data = hdulist[1].data
hdulist.close()
# data.dtype
#
# Sort the data
#
stars = OrderedDict()
for row in data:
if row['apstar_id'] in stars:
stars[row['apstar_id']].append(row)
else:
stars[row['apstar_id']] = Star(row, options.mjd_zero)
#
# Save the data
#
with open(f, 'wb') as p:
pickle.dump(stars, p)
return stars
def create_index(stars, ncol=6):
"""Create index.html file.
Parameters
----------
stars : :class:`dict`
Dictionary containing data grouped by star.
ncol : :class:`int`, optional
Number of columns in the output.
Returns
-------
:class:`str`
The index.html file as a string.
"""
from collections import OrderedDict
from jinja2 import Environment, PackageLoader
tables = OrderedDict()
starflag = APOGEE_STARFLAG()
for s in stars:
stuple = (stars[s].tmassid,
stars[s].teff,
stars[s].logg,
stars[s].mh,
stars[s].sas,
stars[s].cas,
", ".join(starflag.flagname(stars[s].ORstarflag)),)
if stars[s].locid in tables:
tables[stars[s].locid].append(stuple)
else:
tables[stars[s].locid] = [stuple]
#
# Pad tables out to multiples of ncol
#
for t in tables:
while len(tables[t]) % ncol != 0:
tables[t].append(tuple())
env = Environment(loader=PackageLoader('rv', 'templates'))
template = env.get_template('plots.html')
return template.render(title='APOGEE Radial Velocities', ncol=ncol,
tables=tables)
| |
#-*- encoding: UTF-8 -*-
# file generated by convert_i18n_yaml.py from i18n.yaml
languages = \
{'ar': {'and': [u'*', u'\u0648'],
'background': [u'\u0627\u0644\u062e\u0644\u0641\u064a\u0629'],
'but': [u'*', u'\u0644\u0643\u0646'],
'examples': [u'\u0627\u0645\u062b\u0644\u0629'],
'feature': [u'\u062e\u0627\u0635\u064a\u0629'],
'given': [u'*', u'\u0628\u0641\u0631\u0636'],
'name': [u'Arabic'],
'native': [u'\u0627\u0644\u0639\u0631\u0628\u064a\u0629'],
'scenario': [u'\u0633\u064a\u0646\u0627\u0631\u064a\u0648'],
'scenario_outline': [u'\u0633\u064a\u0646\u0627\u0631\u064a\u0648 \u0645\u062e\u0637\u0637'],
'then': [u'*', u'\u0627\u0630\u0627\u064b', u'\u062b\u0645'],
'when': [u'*',
u'\u0645\u062a\u0649',
u'\u0639\u0646\u062f\u0645\u0627']},
'bg': {'and': [u'*', u'\u0418'],
'background': [u'\u041f\u0440\u0435\u0434\u0438\u0441\u0442\u043e\u0440\u0438\u044f'],
'but': [u'*', u'\u041d\u043e'],
'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u0438'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b\u043d\u043e\u0441\u0442'],
'given': [u'*', u'\u0414\u0430\u0434\u0435\u043d\u043e'],
'name': [u'Bulgarian'],
'native': [u'\u0431\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'scenario_outline': [u'\u0420\u0430\u043c\u043a\u0430 \u043d\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'then': [u'*', u'\u0422\u043e'],
'when': [u'*', u'\u041a\u043e\u0433\u0430\u0442\u043e']},
'ca': {'and': [u'*', u'I'],
'background': [u'Rerefons', u'Antecedents'],
'but': [u'*', u'Per\xf2'],
'examples': [u'Exemples'],
'feature': [u'Caracter\xedstica', u'Funcionalitat'],
'given': [u'*', u'Donat', u'Donada', u'At\xe8s', u'Atesa'],
'name': [u'Catalan'],
'native': [u'catal\xe0'],
'scenario': [u'Escenari'],
'scenario_outline': [u"Esquema de l'escenari"],
'then': [u'*', u'Aleshores', u'Cal'],
'when': [u'*', u'Quan']},
'cs': {'and': [u'*', u'A', u'A tak\xe9'],
'background': [u'Pozad\xed', u'Kontext'],
'but': [u'*', u'Ale'],
'examples': [u'P\u0159\xedklady'],
'feature': [u'Po\u017eadavek'],
'given': [u'*', u'Pokud'],
'name': [u'Czech'],
'native': [u'\u010cesky'],
'scenario': [u'Sc\xe9n\xe1\u0159'],
'scenario_outline': [u'N\xe1\u010drt Sc\xe9n\xe1\u0159e',
u'Osnova sc\xe9n\xe1\u0159e'],
'then': [u'*', u'Pak'],
'when': [u'*', u'Kdy\u017e']},
'cy-GB': {'and': [u'*', u'A'],
'background': [u'Cefndir'],
'but': [u'*', u'Ond'],
'examples': [u'Enghreifftiau'],
'feature': [u'Arwedd'],
'given': [u'*', u'Anrhegedig a'],
'name': [u'Welsh'],
'native': [u'Cymraeg'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Scenario Amlinellol'],
'then': [u'*', u'Yna'],
'when': [u'*', u'Pryd']},
'da': {'and': [u'*', u'Og'],
'background': [u'Baggrund'],
'but': [u'*', u'Men'],
'examples': [u'Eksempler'],
'feature': [u'Egenskab'],
'given': [u'*', u'Givet'],
'name': [u'Danish'],
'native': [u'dansk'],
'scenario': [u'Scenarie'],
'scenario_outline': [u'Abstrakt Scenario'],
'then': [u'*', u'S\xe5'],
'when': [u'*', u'N\xe5r']},
'de': {'and': [u'*', u'Und'],
'background': [u'Grundlage'],
'but': [u'*', u'Aber'],
'examples': [u'Beispiele'],
'feature': [u'Funktionalit\xe4t'],
'given': [u'*', u'Angenommen', u'Gegeben sei'],
'name': [u'German'],
'native': [u'Deutsch'],
'scenario': [u'Szenario'],
'scenario_outline': [u'Szenariogrundriss'],
'then': [u'*', u'Dann'],
'when': [u'*', u'Wenn']},
'en': {'and': [u'*', u'And'],
'background': [u'Background'],
'but': [u'*', u'But'],
'examples': [u'Examples', u'Scenarios'],
'feature': [u'Feature'],
'given': [u'*', u'Given'],
'name': [u'English'],
'native': [u'English'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Scenario Outline', u'Scenario Template'],
'then': [u'*', u'Then'],
'when': [u'*', u'When']},
'en-Scouse': {'and': [u'*', u'An'],
'background': [u'Dis is what went down'],
'but': [u'*', u'Buh'],
'examples': [u'Examples'],
'feature': [u'Feature'],
'given': [u'*', u'Givun', u'Youse know when youse got'],
'name': [u'Scouse'],
'native': [u'Scouse'],
'scenario': [u'The thing of it is'],
'scenario_outline': [u'Wharrimean is'],
'then': [u'*', u'Dun', u'Den youse gotta'],
'when': [u'*', u'Wun', u'Youse know like when']},
'en-au': {'and': [u'*', u'N'],
'background': [u'Background'],
'but': [u'*', u'Cept'],
'examples': [u'Cobber'],
'feature': [u'Crikey'],
'given': [u'*', u'Ya know how'],
'name': [u'Australian'],
'native': [u'Australian'],
'scenario': [u'Mate'],
'scenario_outline': [u'Blokes'],
'then': [u'*', u'Ya gotta'],
'when': [u'*', u'When']},
'en-lol': {'and': [u'*', u'AN'],
'background': [u'B4'],
'but': [u'*', u'BUT'],
'examples': [u'EXAMPLZ'],
'feature': [u'OH HAI'],
'given': [u'*', u'I CAN HAZ'],
'name': [u'LOLCAT'],
'native': [u'LOLCAT'],
'scenario': [u'MISHUN'],
'scenario_outline': [u'MISHUN SRSLY'],
'then': [u'*', u'DEN'],
'when': [u'*', u'WEN']},
'en-pirate': {'and': [u'*', u'Aye'],
'background': [u'Yo-ho-ho'],
'but': [u'*', u'Avast!'],
'examples': [u'Dead men tell no tales'],
'feature': [u'Ahoy matey!'],
'given': [u'*', u'Gangway!'],
'name': [u'Pirate'],
'native': [u'Pirate'],
'scenario': [u'Heave to'],
'scenario_outline': [u'Shiver me timbers'],
'then': [u'*', u'Let go and haul'],
'when': [u'*', u'Blimey!']},
'en-tx': {'and': [u'*', u"And y'all"],
'background': [u'Background'],
'but': [u'*', u"But y'all"],
'examples': [u'Examples'],
'feature': [u'Feature'],
'given': [u'*', u"Given y'all"],
'name': [u'Texan'],
'native': [u'Texan'],
'scenario': [u'Scenario'],
'scenario_outline': [u"All y'all"],
'then': [u'*', u"Then y'all"],
'when': [u'*', u"When y'all"]},
'eo': {'and': [u'*', u'Kaj'],
'background': [u'Fono'],
'but': [u'*', u'Sed'],
'examples': [u'Ekzemploj'],
'feature': [u'Trajto'],
'given': [u'*', u'Donita\u0135o'],
'name': [u'Esperanto'],
'native': [u'Esperanto'],
'scenario': [u'Scenaro'],
'scenario_outline': [u'Konturo de la scenaro'],
'then': [u'*', u'Do'],
'when': [u'*', u'Se']},
'es': {'and': [u'*', u'Y'],
'background': [u'Antecedentes'],
'but': [u'*', u'Pero'],
'examples': [u'Ejemplos'],
'feature': [u'Caracter\xedstica'],
'given': [u'*', u'Dado', u'Dada', u'Dados', u'Dadas'],
'name': [u'Spanish'],
'native': [u'espa\xf1ol'],
'scenario': [u'Escenario'],
'scenario_outline': [u'Esquema del escenario'],
'then': [u'*', u'Entonces'],
'when': [u'*', u'Cuando']},
'et': {'and': [u'*', u'Ja'],
'background': [u'Taust'],
'but': [u'*', u'Kuid'],
'examples': [u'Juhtumid'],
'feature': [u'Omadus'],
'given': [u'*', u'Eeldades'],
'name': [u'Estonian'],
'native': [u'eesti keel'],
'scenario': [u'Stsenaarium'],
'scenario_outline': [u'Raamstsenaarium'],
'then': [u'*', u'Siis'],
'when': [u'*', u'Kui']},
'fi': {'and': [u'*', u'Ja'],
'background': [u'Tausta'],
'but': [u'*', u'Mutta'],
'examples': [u'Tapaukset'],
'feature': [u'Ominaisuus'],
'given': [u'*', u'Oletetaan'],
'name': [u'Finnish'],
'native': [u'suomi'],
'scenario': [u'Tapaus'],
'scenario_outline': [u'Tapausaihio'],
'then': [u'*', u'Niin'],
'when': [u'*', u'Kun']},
'fr': {'and': [u'*', u'Et'],
'background': [u'Contexte'],
'but': [u'*', u'Mais'],
'examples': [u'Exemples'],
'feature': [u'Fonctionnalit\xe9'],
'given': [u'*',
u'Soit',
u'Etant donn\xe9',
u'Etant donn\xe9e',
u'Etant donn\xe9s',
u'Etant donn\xe9es',
u'\xc9tant donn\xe9',
u'\xc9tant donn\xe9e',
u'\xc9tant donn\xe9s',
u'\xc9tant donn\xe9es'],
'name': [u'French'],
'native': [u'fran\xe7ais'],
'scenario': [u'Sc\xe9nario'],
'scenario_outline': [u'Plan du sc\xe9nario', u'Plan du Sc\xe9nario'],
'then': [u'*', u'Alors'],
'when': [u'*', u'Quand', u'Lorsque', u"Lorsqu'<"]},
'he': {'and': [u'*', u'\u05d5\u05d2\u05dd'],
'background': [u'\u05e8\u05e7\u05e2'],
'but': [u'*', u'\u05d0\u05d1\u05dc'],
'examples': [u'\u05d3\u05d5\u05d2\u05de\u05d0\u05d5\u05ea'],
'feature': [u'\u05ea\u05db\u05d5\u05e0\u05d4'],
'given': [u'*', u'\u05d1\u05d4\u05d9\u05e0\u05ea\u05df'],
'name': [u'Hebrew'],
'native': [u'\u05e2\u05d1\u05e8\u05d9\u05ea'],
'scenario': [u'\u05ea\u05e8\u05d7\u05d9\u05e9'],
'scenario_outline': [u'\u05ea\u05d1\u05e0\u05d9\u05ea \u05ea\u05e8\u05d7\u05d9\u05e9'],
'then': [u'*', u'\u05d0\u05d6', u'\u05d0\u05d6\u05d9'],
'when': [u'*', u'\u05db\u05d0\u05e9\u05e8']},
'hr': {'and': [u'*', u'I'],
'background': [u'Pozadina'],
'but': [u'*', u'Ali'],
'examples': [u'Primjeri', u'Scenariji'],
'feature': [u'Osobina', u'Mogu\u0107nost', u'Mogucnost'],
'given': [u'*', u'Zadan', u'Zadani', u'Zadano'],
'name': [u'Croatian'],
'native': [u'hrvatski'],
'scenario': [u'Scenarij'],
'scenario_outline': [u'Skica', u'Koncept'],
'then': [u'*', u'Onda'],
'when': [u'*', u'Kada', u'Kad']},
'hu': {'and': [u'*', u'\xc9s'],
'background': [u'H\xe1tt\xe9r'],
'but': [u'*', u'De'],
'examples': [u'P\xe9ld\xe1k'],
'feature': [u'Jellemz\u0151'],
'given': [u'*', u'Amennyiben', u'Adott'],
'name': [u'Hungarian'],
'native': [u'magyar'],
'scenario': [u'Forgat\xf3k\xf6nyv'],
'scenario_outline': [u'Forgat\xf3k\xf6nyv v\xe1zlat'],
'then': [u'*', u'Akkor'],
'when': [u'*', u'Majd', u'Ha', u'Amikor']},
'id': {'and': [u'*', u'Dan'],
'background': [u'Dasar'],
'but': [u'*', u'Tapi'],
'examples': [u'Contoh'],
'feature': [u'Fitur'],
'given': [u'*', u'Dengan'],
'name': [u'Indonesian'],
'native': [u'Bahasa Indonesia'],
'scenario': [u'Skenario'],
'scenario_outline': [u'Skenario konsep'],
'then': [u'*', u'Maka'],
'when': [u'*', u'Ketika']},
'is': {'and': [u'*', u'Og'],
'background': [u'Bakgrunnur'],
'but': [u'*', u'En'],
'examples': [u'D\xe6mi', u'Atbur\xf0ar\xe1sir'],
'feature': [u'Eiginleiki'],
'given': [u'*', u'Ef'],
'name': [u'Icelandic'],
'native': [u'\xcdslenska'],
'scenario': [u'Atbur\xf0ar\xe1s'],
'scenario_outline': [u'L\xfdsing Atbur\xf0ar\xe1sar',
u'L\xfdsing D\xe6ma'],
'then': [u'*', u'\xde\xe1'],
'when': [u'*', u'\xdeegar']},
'it': {'and': [u'*', u'E'],
'background': [u'Contesto'],
'but': [u'*', u'Ma'],
'examples': [u'Esempi'],
'feature': [u'Funzionalit\xe0'],
'given': [u'*', u'Dato', u'Data', u'Dati', u'Date'],
'name': [u'Italian'],
'native': [u'italiano'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Schema dello scenario'],
'then': [u'*', u'Allora'],
'when': [u'*', u'Quando']},
'ja': {'and': [u'*', u'\u304b\u3064<'],
'background': [u'\u80cc\u666f'],
'but': [u'*',
u'\u3057\u304b\u3057<',
u'\u4f46\u3057<',
u'\u305f\u3060\u3057<'],
'examples': [u'\u4f8b', u'\u30b5\u30f3\u30d7\u30eb'],
'feature': [u'\u30d5\u30a3\u30fc\u30c1\u30e3', u'\u6a5f\u80fd'],
'given': [u'*', u'\u524d\u63d0<'],
'name': [u'Japanese'],
'native': [u'\u65e5\u672c\u8a9e'],
'scenario': [u'\u30b7\u30ca\u30ea\u30aa'],
'scenario_outline': [u'\u30b7\u30ca\u30ea\u30aa\u30a2\u30a6\u30c8\u30e9\u30a4\u30f3',
u'\u30b7\u30ca\u30ea\u30aa\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8',
u'\u30c6\u30f3\u30d7\u30ec',
u'\u30b7\u30ca\u30ea\u30aa\u30c6\u30f3\u30d7\u30ec'],
'then': [u'*', u'\u306a\u3089\u3070<'],
'when': [u'*', u'\u3082\u3057<']},
'ko': {'and': [u'*', u'\uadf8\ub9ac\uace0<'],
'background': [u'\ubc30\uacbd'],
'but': [u'*', u'\ud558\uc9c0\ub9cc<', u'\ub2e8<'],
'examples': [u'\uc608'],
'feature': [u'\uae30\ub2a5'],
'given': [u'*', u'\uc870\uac74<', u'\uba3c\uc800<'],
'name': [u'Korean'],
'native': [u'\ud55c\uad6d\uc5b4'],
'scenario': [u'\uc2dc\ub098\ub9ac\uc624'],
'scenario_outline': [u'\uc2dc\ub098\ub9ac\uc624 \uac1c\uc694'],
'then': [u'*', u'\uadf8\ub7ec\uba74<'],
'when': [u'*', u'\ub9cc\uc77c<', u'\ub9cc\uc57d<']},
'lt': {'and': [u'*', u'Ir'],
'background': [u'Kontekstas'],
'but': [u'*', u'Bet'],
'examples': [u'Pavyzd\u017eiai', u'Scenarijai', u'Variantai'],
'feature': [u'Savyb\u0117'],
'given': [u'*', u'Duota'],
'name': [u'Lithuanian'],
'native': [u'lietuvi\u0173 kalba'],
'scenario': [u'Scenarijus'],
'scenario_outline': [u'Scenarijaus \u0161ablonas'],
'then': [u'*', u'Tada'],
'when': [u'*', u'Kai']},
'lu': {'and': [u'*', u'an', u'a'],
'background': [u'Hannergrond'],
'but': [u'*', u'awer', u'm\xe4'],
'examples': [u'Beispiller'],
'feature': [u'Funktionalit\xe9it'],
'given': [u'*', u'ugeholl'],
'name': [u'Luxemburgish'],
'native': [u'L\xebtzebuergesch'],
'scenario': [u'Szenario'],
'scenario_outline': [u'Plang vum Szenario'],
'then': [u'*', u'dann'],
'when': [u'*', u'wann']},
'lv': {'and': [u'*', u'Un'],
'background': [u'Konteksts', u'Situ\u0101cija'],
'but': [u'*', u'Bet'],
'examples': [u'Piem\u0113ri', u'Paraugs'],
'feature': [u'Funkcionalit\u0101te', u'F\u012b\u010da'],
'given': [u'*', u'Kad'],
'name': [u'Latvian'],
'native': [u'latvie\u0161u'],
'scenario': [u'Scen\u0101rijs'],
'scenario_outline': [u'Scen\u0101rijs p\u0113c parauga'],
'then': [u'*', u'Tad'],
'when': [u'*', u'Ja']},
'nl': {'and': [u'*', u'En'],
'background': [u'Achtergrond'],
'but': [u'*', u'Maar'],
'examples': [u'Voorbeelden'],
'feature': [u'Functionaliteit'],
'given': [u'*', u'Gegeven', u'Stel'],
'name': [u'Dutch'],
'native': [u'Nederlands'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Abstract Scenario'],
'then': [u'*', u'Dan'],
'when': [u'*', u'Als']},
'no': {'and': [u'*', u'Og'],
'background': [u'Bakgrunn'],
'but': [u'*', u'Men'],
'examples': [u'Eksempler'],
'feature': [u'Egenskap'],
'given': [u'*', u'Gitt'],
'name': [u'Norwegian'],
'native': [u'norsk'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Scenariomal', u'Abstrakt Scenario'],
'then': [u'*', u'S\xe5'],
'when': [u'*', u'N\xe5r']},
'pl': {'and': [u'*', u'Oraz', u'I'],
'background': [u'Za\u0142o\u017cenia'],
'but': [u'*', u'Ale'],
'examples': [u'Przyk\u0142ady'],
'feature': [u'W\u0142a\u015bciwo\u015b\u0107'],
'given': [u'*', u'Zak\u0142adaj\u0105c', u'Maj\u0105c'],
'name': [u'Polish'],
'native': [u'polski'],
'scenario': [u'Scenariusz'],
'scenario_outline': [u'Szablon scenariusza'],
'then': [u'*', u'Wtedy'],
'when': [u'*', u'Je\u017celi', u'Je\u015bli']},
'pt': {'and': [u'*', u'E'],
'background': [u'Contexto'],
'but': [u'*', u'Mas'],
'examples': [u'Exemplos'],
'feature': [u'Funcionalidade'],
'given': [u'*', u'Dado', u'Dada', u'Dados', u'Dadas'],
'name': [u'Portuguese'],
'native': [u'portugu\xeas'],
'scenario': [u'Cen\xe1rio', u'Cenario'],
'scenario_outline': [u'Esquema do Cen\xe1rio', u'Esquema do Cenario'],
'then': [u'*', u'Ent\xe3o', u'Entao'],
'when': [u'*', u'Quando']},
'ro': {'and': [u'*', u'Si', u'\u0218i', u'\u015ei'],
'background': [u'Context'],
'but': [u'*', u'Dar'],
'examples': [u'Exemple'],
'feature': [u'Functionalitate',
u'Func\u021bionalitate',
u'Func\u0163ionalitate'],
'given': [u'*',
u'Date fiind',
u'Dat fiind',
u'Dati fiind',
u'Da\u021bi fiind',
u'Da\u0163i fiind'],
'name': [u'Romanian'],
'native': [u'rom\xe2n\u0103'],
'scenario': [u'Scenariu'],
'scenario_outline': [u'Structura scenariu',
u'Structur\u0103 scenariu'],
'then': [u'*', u'Atunci'],
'when': [u'*', u'Cand', u'C\xe2nd']},
'ru': {'and': [u'*',
u'\u0418',
u'\u041a \u0442\u043e\u043c\u0443 \u0436\u0435'],
'background': [u'\u041f\u0440\u0435\u0434\u044b\u0441\u0442\u043e\u0440\u0438\u044f',
u'\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442'],
'but': [u'*', u'\u041d\u043e', u'\u0410'],
'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u044b'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u044f',
u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b',
u'\u0421\u0432\u043e\u0439\u0441\u0442\u0432\u043e'],
'given': [u'*',
u'\u0414\u043e\u043f\u0443\u0441\u0442\u0438\u043c',
u'\u0414\u0430\u043d\u043e',
u'\u041f\u0443\u0441\u0442\u044c'],
'name': [u'Russian'],
'native': [u'\u0440\u0443\u0441\u0441\u043a\u0438\u0439'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u044f'],
'then': [u'*', u'\u0422\u043e', u'\u0422\u043e\u0433\u0434\u0430'],
'when': [u'*',
u'\u0415\u0441\u043b\u0438',
u'\u041a\u043e\u0433\u0434\u0430']},
'sk': {'and': [u'*', u'A'],
'background': [u'Pozadie'],
'but': [u'*', u'Ale'],
'examples': [u'Pr\xedklady'],
'feature': [u'Po\u017eiadavka'],
'given': [u'*', u'Pokia\u013e'],
'name': [u'Slovak'],
'native': [u'Slovensky'],
'scenario': [u'Scen\xe1r'],
'scenario_outline': [u'N\xe1\u010drt Scen\xe1ru'],
'then': [u'*', u'Tak'],
'when': [u'*', u'Ke\u010f']},
'sr-Cyrl': {'and': [u'*', u'\u0418'],
'background': [u'\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442',
u'\u041e\u0441\u043d\u043e\u0432\u0430',
u'\u041f\u043e\u0437\u0430\u0434\u0438\u043d\u0430'],
'but': [u'*', u'\u0410\u043b\u0438'],
'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u0438',
u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0458\u0438'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b\u043d\u043e\u0441\u0442',
u'\u041c\u043e\u0433\u0443\u045b\u043d\u043e\u0441\u0442',
u'\u041e\u0441\u043e\u0431\u0438\u043d\u0430'],
'given': [u'*',
u'\u0417\u0430\u0434\u0430\u0442\u043e',
u'\u0417\u0430\u0434\u0430\u0442\u0435',
u'\u0417\u0430\u0434\u0430\u0442\u0438'],
'name': [u'Serbian'],
'native': [u'\u0421\u0440\u043f\u0441\u043a\u0438'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u043e',
u'\u041f\u0440\u0438\u043c\u0435\u0440'],
'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u0458\u0430',
u'\u0421\u043a\u0438\u0446\u0430',
u'\u041a\u043e\u043d\u0446\u0435\u043f\u0442'],
'then': [u'*', u'\u041e\u043d\u0434\u0430'],
'when': [u'*',
u'\u041a\u0430\u0434\u0430',
u'\u041a\u0430\u0434']},
'sr-Latn': {'and': [u'*', u'I'],
'background': [u'Kontekst', u'Osnova', u'Pozadina'],
'but': [u'*', u'Ali'],
'examples': [u'Primeri', u'Scenariji'],
'feature': [u'Funkcionalnost',
u'Mogu\u0107nost',
u'Mogucnost',
u'Osobina'],
'given': [u'*', u'Zadato', u'Zadate', u'Zatati'],
'name': [u'Serbian (Latin)'],
'native': [u'Srpski (Latinica)'],
'scenario': [u'Scenario', u'Primer'],
'scenario_outline': [u'Struktura scenarija',
u'Skica',
u'Koncept'],
'then': [u'*', u'Onda'],
'when': [u'*', u'Kada', u'Kad']},
'sv': {'and': [u'*', u'Och'],
'background': [u'Bakgrund'],
'but': [u'*', u'Men'],
'examples': [u'Exempel'],
'feature': [u'Egenskap'],
'given': [u'*', u'Givet'],
'name': [u'Swedish'],
'native': [u'Svenska'],
'scenario': [u'Scenario'],
'scenario_outline': [u'Abstrakt Scenario', u'Scenariomall'],
'then': [u'*', u'S\xe5'],
'when': [u'*', u'N\xe4r']},
'tr': {'and': [u'*', u'Ve'],
'background': [u'Ge\xe7mi\u015f'],
'but': [u'*', u'Fakat', u'Ama'],
'examples': [u'\xd6rnekler'],
'feature': [u'\xd6zellik'],
'given': [u'*', u'Diyelim ki'],
'name': [u'Turkish'],
'native': [u'T\xfcrk\xe7e'],
'scenario': [u'Senaryo'],
'scenario_outline': [u'Senaryo tasla\u011f\u0131'],
'then': [u'*', u'O zaman'],
'when': [u'*', u'E\u011fer ki']},
'uk': {'and': [u'*',
u'\u0406',
u'\u0410 \u0442\u0430\u043a\u043e\u0436',
u'\u0422\u0430'],
'background': [u'\u041f\u0435\u0440\u0435\u0434\u0443\u043c\u043e\u0432\u0430'],
'but': [u'*', u'\u0410\u043b\u0435'],
'examples': [u'\u041f\u0440\u0438\u043a\u043b\u0430\u0434\u0438'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0456\u043e\u043d\u0430\u043b'],
'given': [u'*',
u'\u041f\u0440\u0438\u043f\u0443\u0441\u0442\u0438\u043c\u043e',
u'\u041f\u0440\u0438\u043f\u0443\u0441\u0442\u0438\u043c\u043e, \u0449\u043e',
u'\u041d\u0435\u0445\u0430\u0439',
u'\u0414\u0430\u043d\u043e'],
'name': [u'Ukrainian'],
'native': [u'\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0456\u0439'],
'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0456\u044e'],
'then': [u'*', u'\u0422\u043e', u'\u0422\u043e\u0434\u0456'],
'when': [u'*',
u'\u042f\u043a\u0449\u043e',
u'\u041a\u043e\u043b\u0438']},
'uz': {'and': [u'*', u'\u0412\u0430'],
'background': [u'\u0422\u0430\u0440\u0438\u0445'],
'but': [u'*',
u'\u041b\u0435\u043a\u0438\u043d',
u'\u0411\u0438\u0440\u043e\u043a',
u'\u0410\u043c\u043c\u043e'],
'examples': [u'\u041c\u0438\u0441\u043e\u043b\u043b\u0430\u0440'],
'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b'],
'given': [u'*', u'\u0410\u0433\u0430\u0440'],
'name': [u'Uzbek'],
'native': [u'\u0423\u0437\u0431\u0435\u043a\u0447\u0430'],
'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'],
'scenario_outline': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439 \u0441\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430\u0441\u0438'],
'then': [u'*', u'\u0423\u043d\u0434\u0430'],
'when': [u'*', u'\u0410\u0433\u0430\u0440']},
'vi': {'and': [u'*', u'V\xe0'],
'background': [u'B\u1ed1i c\u1ea3nh'],
'but': [u'*', u'Nh\u01b0ng'],
'examples': [u'D\u1eef li\u1ec7u'],
'feature': [u'T\xednh n\u0103ng'],
'given': [u'*', u'Bi\u1ebft', u'Cho'],
'name': [u'Vietnamese'],
'native': [u'Ti\u1ebfng Vi\u1ec7t'],
'scenario': [u'T\xecnh hu\u1ed1ng', u'K\u1ecbch b\u1ea3n'],
'scenario_outline': [u'Khung t\xecnh hu\u1ed1ng',
u'Khung k\u1ecbch b\u1ea3n'],
'then': [u'*', u'Th\xec'],
'when': [u'*', u'Khi']},
'zh-CN': {'and': [u'*', u'\u800c\u4e14<'],
'background': [u'\u80cc\u666f'],
'but': [u'*', u'\u4f46\u662f<'],
'examples': [u'\u4f8b\u5b50'],
'feature': [u'\u529f\u80fd'],
'given': [u'*', u'\u5047\u5982<'],
'name': [u'Chinese simplified'],
'native': [u'\u7b80\u4f53\u4e2d\u6587'],
'scenario': [u'\u573a\u666f'],
'scenario_outline': [u'\u573a\u666f\u5927\u7eb2'],
'then': [u'*', u'\u90a3\u4e48<'],
'when': [u'*', u'\u5f53<']},
'zh-TW': {'and': [u'*', u'\u800c\u4e14<', u'\u4e26\u4e14<'],
'background': [u'\u80cc\u666f'],
'but': [u'*', u'\u4f46\u662f<'],
'examples': [u'\u4f8b\u5b50'],
'feature': [u'\u529f\u80fd'],
'given': [u'*', u'\u5047\u8a2d<'],
'name': [u'Chinese traditional'],
'native': [u'\u7e41\u9ad4\u4e2d\u6587'],
'scenario': [u'\u5834\u666f', u'\u5287\u672c'],
'scenario_outline': [u'\u5834\u666f\u5927\u7db1',
u'\u5287\u672c\u5927\u7db1'],
'then': [u'*', u'\u90a3\u9ebc<'],
'when': [u'*', u'\u7576<']}}
| |
import logging
#from tripchaingame.web.reittiopasAPI import ReittiopasAPI
from reittiopasAPI import ReittiopasAPI
from point import LocationPoint
import json
import collections
from datetime import datetime
from ..models import Point, SecondaryPoint, AnalysisInfo, Trip
'''
A class dedicated to work on place recognition logic.
'''
#Logging
logger = logging.getLogger(__name__)
class PlaceRecognition:
def __init__(self):
#init vars
self.__count = 0
self.__count_of_points = {}
self.__count_of_end_points = {}
self.__threshold=0.15
#array of points (objects of class LocationPoint)
self.__points = []
def most_frequent_secondary_point(self, coordinates):
lon=0
lat=0
if len(coordinates) > 0:
x=collections.Counter(coordinates)
logger.debug(x.most_common())
#Get first good pair of coordinates
list = dict(x.most_common())
for key, value in list.items():
lonlat = key.split(",")
lon = lonlat[0]
lat = lonlat[1]
#logger.debug("lon=%s,lat=%s (%s)" % (str(lon), str(lat), str(key)))
if len(lon)>0 and len(lat)>0:
break
return lon,lat
def save_secondary_point(self, point, uid):
'''
Saves or updates a SecondaryPoint regarding the points existence. This is for all points, known or unknown.
@param point: LocationPoint point that contains the data that is to be saved
@param uid: user id
'''
p = SecondaryPoint.objects.filter(user_id=uid, address=point.get_address())
if SecondaryPoint.objects.filter(user_id=uid, address=point.get_address()).exists():
p.address=point.get_address()
#Updating list of coordinates with values - this list contains all values for later analysis
coordinates = SecondaryPoint.objects.get(user_id=uid, address=point.get_address()).coords
coordinates.extend(point.get_coords())
visits = SecondaryPoint.objects.get(user_id=uid, address=point.get_address()).visit_frequency
visits = visits + point.get_points()
#p.coords=coordinates
#p.visit_frequency=point.get_points()
#analysis = AnalysisInfo.objects.filter(user_id=uid)
#analysis.update(analysis_date = date)
#
return p.update(visit_frequency=visits, coords=coordinates)
else:
p = SecondaryPoint(address=point.get_address(),
coords=point.get_coords(),
visit_frequency=point.get_points(),
user_id=uid)
return p.save()
def save_point(self, point, uid):
'''
Saves or updates a point regarding the points existence. Doesn't update point's type.
@param point: LocationPoint point that contains the data that is to be saved
@param uid: user id
'''
lon = 0
lat = 0
p = Point.objects.filter(user_id=uid, address=point.address)
#get primary lon, lat - that is the most frequently user coordinates
coordinates = SecondaryPoint.objects.get(user_id=uid, address=point.address).coords
lon, lat = self.most_frequent_secondary_point(coordinates)
if Point.objects.filter(user_id=uid, address=point.address).exists():
#p.address=point.address
#p.visit_frequency=point.visit_frequency
#p.lon=lon
#p.lat=lat
return p.update(visit_frequency=point.visit_frequency, lon=lon, lat=lat)
else:
p = Point(address=point.address,
visit_frequency=point.visit_frequency,
user_id=uid, lon=lon, lat=lat, type='UN')
return p.save()
def save_analysis_info(self, uid):
'''
Saves the data of last analysis into the data model.
@param uid: user id
'''
analys = AnalysisInfo.objects.filter(user_id=uid)
if analys:
i = datetime.now()
date = i.strftime('%Y-%m-%d %H:%M:%S') #default format '%Y-%m-%d %H:%M:%S'
analysis = AnalysisInfo.objects.filter(user_id=uid)
analysis.update(analysis_date = date)
return None
else:
i = datetime.now()
date = i.strftime('%Y-%m-%d %H:%M:%S') #default format '%Y-%m-%d %H:%M:%S'
analysis = AnalysisInfo(user_id=uid, analysis_date = date)
return analysis.save()
def get_points(self):
return self.__points
def get_end_points(self):
return self.__count_of_end_points
def get_size_threshold(self):
return len(self.__points)
def point_analysis(self, trips, user_id):
if len(trips) > 0:
self.trip_point_profiler(trips)
return self.save_location_points(user_id)
else:
response = [str(t) + "<br/>" for t in SecondaryPoint.objects.filter(user_id=user_id)]
return response
def save_location_points(self, user_id):
'''
Get's points of interest and saves them into the db. Completes analysis by updating or creating a timestamp.
'''
size = self.get_size_threshold()
points = self.get_points()
secondary_points_of_interest = []
points_of_interest = []
for point in points:
res = self.save_secondary_point(point, user_id)
logger.debug("saved a secondary point %s" % str(res) )
secondary_points_of_interest.append(point)
points_of_interest = self.get_points_of_interest(user_id)
if len(points_of_interest) > 0 or len(secondary_points_of_interest) > 0:
self.save_analysis_info(user_id)
logger.debug("Saved analysis info secondary points %d, primary points %d" % (len(points_of_interest), len(secondary_points_of_interest)))
if len(points_of_interest) > 0:
logger.info("Stored primary points of interest")
return points_of_interest
else:
logger.info("Stored secondary points of interest only")
return secondary_points_of_interest
def get_points_of_interest(self, user_id):
'''
Get's points of interest and saves them into the db
'''
total_visits = 0
points = SecondaryPoint.objects.filter(user_id=user_id)
for p in points:
total_visits = total_visits+p.visit_frequency
points_of_interest = []
for point in points:
value = float(point.visit_frequency) / float(total_visits)
#point.set_threshold_value(value)
logger.debug("Threshold calc = %.2f(point visitations) / %.2f (total) for %s" % (float(point.visit_frequency), float(total_visits), point.address))
logger.debug("Threshold equation %.2f (value) >= %.2f (threshold) for %s" % (value, self.__threshold, point.address))
if value >= self.__threshold:
res = self.save_point(point, user_id)
logger.debug("saved as a primary point %s" % str(point) )
points_of_interest.append(point)
return points_of_interest
def find_element(self, element):
try:
index_element=self.__points.index(element)
return index_element
except ValueError:
return -1
def get_first_point(self, coords):
'''
Calculates how many times user has used a particular point by saving new point objects into array of point objects.
In case the point already exits appending new point's coordinates into the point's array of coordinates and increasing the visit frequency by one.
'''
point = self.check_trip_points(coords)
if len(point.get_address()) > 0:
index = self.find_element(point)
if index > 0:
self.__points[index].add_point()
self.__points[index].set_coords(point.pop_coords())
#logger.debug("Point (%s) exists, incremented counter" % self.__points[index])
else:
point.add_point()
self.__points.append(point)
#logger.debug("New point (%s)" % point)
def get_last_location(self, feature_array, last):
last = last+1
wrapper = []
for i in reversed(xrange(last)):
#logger.warn("%d / %d : %s\n" % (i, last, feature_array[i]))
tyyppi = feature_array[i]['geometry']['type']
if tyyppi == "LineString" or tyyppi == "Point":
coords = feature_array[i]["geometry"]["coordinates"]
#logger.warn(coords)
print (tyyppi)
if len(coords) > 0:
if tyyppi == "Point":
#l = list(reversed(coords))
wrapper.append(coords)
print("Wrapped up: %s" % coords)
if len(wrapper) > 0:
return wrapper
return list(reversed(coords))
else:
logger.debug("Skipped empty coords")
else:
logger.debug("Skipped one: %s" % tyyppi)
def get_first_location(self, feature_array, last):
wrapper = []
for i in range(0,last):
#logger.warn("%d / %d : %s\n" % (i, last, feature_array[i]))
tyyppi = feature_array[i]['geometry']['type']
if tyyppi == "LineString" or tyyppi == "Point":
print (tyyppi)
coords = feature_array[i]["geometry"]["coordinates"]
if tyyppi == "Point":
wrapper.append(coords)
print("Wrapped up: %s" % coords)
if len(wrapper) > 0:
return wrapper
if len(coords) > 0:
return coords
def trip_point_profiler(self,trips):
logger.debug("trips = %d" % len(trips))
for t in trips:
array = {}
trip = t.trip
geo_json_trips = json.dumps(trip)
items = json.loads(geo_json_trips)
feature_array = items["features"]
last = len(feature_array)-1
if len(feature_array) > 0:
#logger.debug("len = %d, data size = %d " % (last,len(feature_array)))
if len(feature_array) > 1:
node = self.get_first_location(feature_array, last)
if node != None:
array[0] = node
else:
logger.debug("Got None")
node = self.get_last_location(feature_array, last)
if node != None:
array[1] = node
#logger.debug("End Point: %s" % node)
else:
logger.debug("Got None")
else:
item = feature_array[0]
coords = item["geometry"]["coordinates"]
if len(coords) > 0:
#logger.debug("Due to small size %d take only start point " % len(feature_array))
array[0] = coords
if len(coords) > 1:
l = list(reversed(coords))
#logger.debug("Reversed coords: %s " % l)
array[1] = l
else:
logger.debug("Skipped a point, no coordinated %s" % coords)
#logger.warn(array)
for item in array.keys():
#logger.warn(item)
self.get_first_point(array[item])
#tyyppi = item['geometry']['type']
#if tyyppi == "LineString":
# coords = item["geometry"]["coordinates"]
# self.get_first_point(coords)
# for item in array:
#
# tyyppi = item['geometry']['type']
# if tyyppi == "LineString":
# coords = item["geometry"]["coordinates"]
# self.get_first_point(coords)
#break
#else:
# continue
def check_trip_points(self, trip):
'''
Iterates through coordinates of a trip array and fetches the first point information
@param trip: array of coordinates for the trip
'''
for i,coordinates in enumerate(trip):
#logger.debug("Indefication of %s" % coordinates)
if isinstance(coordinates, list):
coords = "%s,%s" % (coordinates[0],coordinates[1])
if i == 0:
start_point=self.get_point_information(coords)
return start_point
logger.warn("Returning empty start point, check your coordinates %s in index %d" % (coords, i))
return LocationPoint()
def get_point_information(self, coordinates):
'''
Retrieves the wanted point information for a point
NOTE: coordinates must be in (wgs84) format: latitude,longitude
@param coordinates: string coordinates
'''
reittiopas = ReittiopasAPI()
result = reittiopas.get_reverse_geocode(coordinates)
#result.set_coords(coordinates)
return result
def get_count_of_new_trips(self, uid):
'''
Returns the count of new trips since last analysis.
@param uid: user id
'''
trip_count = 0
last_analysis = None
#Today's date
date_today = self._get_todays_date()
#Last analysis date
qs = AnalysisInfo.objects.filter(user_id=uid)
if qs.exists():
last_analysis = qs[0]
logger.debug("search %s - %s" % (date_today,str(last_analysis)))
if last_analysis:
analysis_date = last_analysis.analysis_date
trips = Trip.objects.filter(user_id=uid,started_at__range=[analysis_date, date_today])
trip_count = len(trips)
else:
#first time for everything
self.save_analysis_info(uid)
trips = Trip.objects.filter(user_id=uid)
trip_count = len(trips)
return trip_count
def _get_todays_date(self):
i = datetime.now()
date_today = i.strftime('%Y-%m-%d %H:%M:%S')
return date_today
| |
from collections import Counter
import json
import math
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
INPUT_PATH = "/home/joao/Dropbox/Twitter/Filtered.Distributions/"
OUTPUT_PATH = "/home/joao/Desktop/"
def get_language_data():
df = pd.read_csv('../data/profile/users_profile_data.csv')
df['language'] = map(lambda lang: 'en' if 'en-' in lang else lang, df['language'])
df['language'] = map(lambda lang: 'es' if 'es-' in lang else lang, df['language'])
df['language'] = map(lambda lang: 'pt' if 'pt-' in lang else lang, df['language'])
# df = df.loc[]
top10 = ['en', 'es', 'pt', 'fr', 'it', 'de', 'ja', 'ar''en-gb', 'id']
df['language'] = map(lambda lang: lang if lang in top10 else 'other', df['language'])
return dict(zip(df['user_id'], df['language']))
def get_freqs(values):
values = np.array(values).astype(float)
return np.round(values / np.sum(values), 5)
def ecdf(values):
return Counter(values)
def calc_cdf(Fn, x):
x = round(x, 4)
keys = filter(lambda f: round(f, 4) <= x, Fn)
if len(keys) == 0:
return 0.
return np.sum(map(lambda p: p * Fn[p], keys))
def calc_ccdf(Fn, x):
return 1 - calc_cdf(Fn=Fn, x=x)
def calc_entropy(freqs):
freqs = get_freqs(freqs)
log_size = math.log(len(freqs), 2)
return - sum(freqs * np.log2(freqs)) / log_size
def calc_top(freqs):
freqs = get_freqs(freqs)
std = np.round(np.std(freqs), 4)
if std > 0.:
return np.round(np.ceil((np.max(freqs) - np.mean(freqs)) / std), 5)
else:
return 0.
def calc_mean(values):
return np.round(np.mean(values), 5)
def handle_summary(input_file, output_file):
with open(input_file, 'r') as infile:
keys = []
tops = []
olds = []
ccdfs = []
for line in infile.readlines():
jd = json.loads(line)
key = jd.keys()[0]
values = get_freqs(jd[key])
keys.append(key)
tops.append(calc_top(freqs=values))
size = int(.1 * np.size(values))
olds.append(sum(values[0:size]) / sum(values))
ccdfs.append(calc_ccdf(Fn=ecdf(values=values), x=calc_mean(values)))
df = pd.DataFrame()
df['id'] = keys
df['top'] = tops
df['old'] = olds
df['ccdfs'] = ccdfs
df.to_csv(output_file)
def handle_summary2(input_file, output_file, c1=0.0, c2=0.0):
with open(input_file, 'r') as infile, open(output_file, 'wb') as outfile:
for line in infile.readlines():
jd = json.loads(line)
key = jd.keys()[0]
values = get_freqs(jd[key])
c = calc_ccdf(Fn=ecdf(values=values), x=calc_mean(values))
if c1 == 0. and c2 == 0. and c <= c1:
outfile.write(line)
elif c1 == 0. and c2 > 0.:
if c1 < c < c2:
outfile.write(line)
elif c1 != 0. and c2 != 0.:
if c1 <= c < c2:
outfile.write(line)
def handle_summary3(input_file, output_file):
with open(input_file, 'r') as infile, open(output_file, 'wb') as outfile:
for line in infile.readlines():
jd = json.loads(line)
key = jd.keys()[0]
values = get_freqs(jd[key])
c = round(calc_entropy(values), 5)
c = c if c <= 1.0 else 1.0
outfile.write(str(key) + ' ' + str(c) + '\n')
def handle_summary4(input_file, output_file):
with open(input_file, 'r') as infile, open(output_file, 'wb') as outfile:
for line in infile.readlines():
jd = json.loads(line)
key = jd.keys()[0]
values = get_freqs(jd[key])
c = round(calc_ccdf(Fn=ecdf(values=values), x=calc_mean(values)), 5)
c = c if c >= 0. else 0.0
outfile.write(str(key) + ' ' + str(c) + '\n')
fidelidades = ['SemFidelidade', 'BaixaFidelidade', 'Fidelidade', 'AltaFidelidade']
inputname = "{0}{1}{2}{3}"
handle_summary(INPUT_PATH + "like.jsons", OUTPUT_PATH + "like.csv")
handle_summary(INPUT_PATH + "mention.jsons", OUTPUT_PATH + "mention.csv")
handle_summary(INPUT_PATH + "retweet.jsons", OUTPUT_PATH + "retweet.csv")
handle_summary2(INPUT_PATH + "like.jsons", inputname.format(OUTPUT_PATH, 'like', fidelidades[3], '.dat'), .8, 2.)
handle_summary2(INPUT_PATH + "mention.jsons", inputname.format(OUTPUT_PATH, 'mention', fidelidades[3], '.dat'), .8, 2.)
handle_summary2(INPUT_PATH + "retweet.jsons", inputname.format(OUTPUT_PATH, 'retweet', fidelidades[3], '.dat'), .8, 2.)
handle_summary2(INPUT_PATH + "union.jsons", inputname.format(OUTPUT_PATH, 'union', fidelidades[3], '.dat'), .8, 2.)
handle_summary2(INPUT_PATH + "like.jsons", inputname.format(OUTPUT_PATH, 'like', fidelidades[2], '.dat'), .2, .8)
handle_summary2(INPUT_PATH + "mention.jsons", inputname.format(OUTPUT_PATH, 'mention', fidelidades[2], '.dat'), .2, .8)
handle_summary2(INPUT_PATH + "retweet.jsons", inputname.format(OUTPUT_PATH, 'retweet', fidelidades[2], '.dat'), .2, .8)
handle_summary2(INPUT_PATH + "union.jsons", inputname.format(OUTPUT_PATH, 'union', fidelidades[2], '.dat'), .2, .8)
handle_summary2(INPUT_PATH + "like.jsons", inputname.format(OUTPUT_PATH, 'like', fidelidades[1], '.dat'), .0, .2)
handle_summary2(INPUT_PATH + "mention.jsons", inputname.format(OUTPUT_PATH, 'mention', fidelidades[1], '.dat'), .0, .2)
handle_summary2(INPUT_PATH + "retweet.jsons", inputname.format(OUTPUT_PATH, 'retweet', fidelidades[1], '.dat'), .0, .2)
handle_summary2(INPUT_PATH + "union.jsons", inputname.format(OUTPUT_PATH, 'union', fidelidades[1], '.dat'), .0, .2)
handle_summary2(INPUT_PATH + "like.jsons", inputname.format(OUTPUT_PATH, 'like', fidelidades[0], '.dat'))
handle_summary2(INPUT_PATH + "mention.jsons", inputname.format(OUTPUT_PATH, 'mention', fidelidades[0], '.dat'))
handle_summary2(INPUT_PATH + "retweet.jsons", inputname.format(OUTPUT_PATH, 'retweet', fidelidades[0], '.dat'))
handle_summary2(INPUT_PATH + "union.jsons", inputname.format(OUTPUT_PATH, 'union', fidelidades[0], '.dat'))
for f in fidelidades:
handle_summary3(inputname.format(OUTPUT_PATH, 'like', f, '.dat'),
inputname.format(OUTPUT_PATH, 'entropiaLike', f, '.csv'))
handle_summary3(inputname.format(OUTPUT_PATH, 'mention', f, '.dat'),
inputname.format(OUTPUT_PATH, 'entropiaMention', f, '.csv'))
handle_summary3(inputname.format(OUTPUT_PATH, 'retweet', f, '.dat'),
inputname.format(OUTPUT_PATH, 'entropiaRetweet', f, '.csv'))
handle_summary3(inputname.format(OUTPUT_PATH, 'union', f, '.dat'),
inputname.format(OUTPUT_PATH, 'entropiaUnion', f, '.csv'))
for f in fidelidades:
handle_summary4(inputname.format(OUTPUT_PATH, 'like', f, '.dat'),
inputname.format(OUTPUT_PATH, 'ccdfLike', f, '.csv'))
handle_summary4(inputname.format(OUTPUT_PATH, 'mention', f, '.dat'),
inputname.format(OUTPUT_PATH, 'ccdfMention', f, '.csv'))
handle_summary4(inputname.format(OUTPUT_PATH, 'retweet', f, '.dat'),
inputname.format(OUTPUT_PATH, 'ccdfRetweet', f, '.csv'))
handle_summary4(inputname.format(OUTPUT_PATH, 'union', f, '.dat'),
inputname.format(OUTPUT_PATH, 'ccdfUnion', f, '.csv'))
# handle_summary3(OUTPUT_PATH + "like20.jsons", OUTPUT_PATH + "like20.csv")
# handle_summary3(OUTPUT_PATH + "mention20.jsons", OUTPUT_PATH + "mention20.csv")
# handle_summary3(OUTPUT_PATH + "retweet20.jsons", OUTPUT_PATH + "retweet20.csv")
#
#
# handle_summary3(OUTPUT_PATH + "like00.jsons", OUTPUT_PATH + "like00.csv")
# handle_summary3(OUTPUT_PATH + "mention00.jsons", OUTPUT_PATH + "mention00.csv")
# handle_summary3(OUTPUT_PATH + "retweet00.jsons", OUTPUT_PATH + "retweet00.csv")
# df = pd.read_csv(OUTPUT_PATH + "like.csv")
#
# df1 = df.loc[df['old'] >= 0.9]
# df2 = df.loc[df['ccdfs'] >= 0.9]
#
# print len(df1['old'].values) / float(len(df['old'].values)) * 100, len(df2['ccdfs'].values) / float(
# len(df['ccdfs'].values)) * 100
#
# df = pd.read_csv(OUTPUT_PATH + "mention.csv")
#
# df1 = df.loc[df['old'] >= 0.9]
# df2 = df.loc[df['ccdfs'] >= 0.9]
#
# print len(df1['old'].values) / float(len(df['old'].values)) * 100, len(df2['ccdfs'].values) / float(
# len(df['ccdfs'].values)) * 100
#
# df = pd.read_csv(OUTPUT_PATH + "retweet.csv")
#
# df1 = df.loc[df['old'] >= 0.9]
# df2 = df.loc[df['ccdfs'] >= 0.9]
#
# print len(df1['old'].values) / float(len(df['old'].values)) * 100, len(df2['ccdfs'].values) / float(
# len(df['ccdfs'].values)) * 100
# def violin(dfs, filename):
# sns.set(style="whitegrid", palette="muted", color_codes=True)
# plt.plot()
# fig, axes = plt.subplots(ncols=3)
# sns.violinplot(dfs['top'], ax=axes[0], color='b')
# sns.violinplot(dfs['ccdfs'], ax=axes[1], color='r')
# sns.violinplot(dfs['old'], ax=axes[2], color='g')
#
# # format_axes(axes)
#
# plt.tight_layout()
#
# fig.savefig(OUTPUT_PATH + filename) # save the figure to file
# plt.close(fig)
# # violin(df, 'like')
#
# df = pd.read_csv(OUTPUT_PATH + "like.csv")
#
# print '\n', np.percentile(df['top'], [20, 80])
# print np.percentile(df['ccdfs'], [20, 80])
# print np.percentile(df['old'], [20, 80])
#
#
# df = pd.read_csv(OUTPUT_PATH + "mention.csv")
# # violin(df, 'mention')
#
# print '\n', np.percentile(df['top'], [10, 90])
# print np.percentile(df['ccdfs'], [10, 90])
# print np.percentile(df['old'], [10, 90])
#
#
# df = pd.read_csv(OUTPUT_PATH + "retweet.csv")
# # violin(df, 'retweet')
#
# print '\n', np.percentile(df['top'], [10, 90])
# print np.percentile(df['ccdfs'], [10, 90])
# print np.percentile(df['old'], [10, 90])
| |
#!/usr/bin/env python
# CallHap General.py
# By Brendan Kohrn
# 3/20/2017
#
# This script contains general functions for CallHap
import numpy as np
import math
import decimal as dec
def comparePotHaps(potHapSetA, potHapSetB, numInitialHaps):
'''Check if all haplotypes in two haplotype sets are the same'''
# If two haplotype sets are different lengths, they are different
if len(potHapSetA) != len(potHapSetB):
return(False)
else:
return(all(np.all(x==y) for x,y in zip(potHapSetA[numInitialHaps:],
potHapSetB[numInitialHaps:])))
def average(inList):
''' Take the average value of a list'''
# Make sure the list has length
if len(inList) == 0:
raise("Error in Average: %s" % inList)
return(float(sum(inList))/len(inList))
def npDecZeros(rows, cols=0):
'''Create a numpy array of Decimal(0) values'''
if cols == 0:
outArray = np.zeros(rows,dtype=dec.Decimal)
for rowIter in xrange(rows):
outArray[rowIter] = dec.Decimal(outArray[rowIter])
else:
outArray = np.zeros((rows,cols), dtype=dec.Decimal)
for rowIter in xrange(rows):
for colIter in xrange(cols):
outArray[rowIter,colIter] = dec.Decimal(outArray[rowIter,
colIter])
return(outArray)
def npToDecNp(inArray):
'''Convert a numpy array of floats to a numpy array of Decimal numbers to
avoid rounding errors'''
outArray = np.array(inArray, dtype=dec.Decimal)
for elmnt, value in np.ndenumerate(outArray):
outArray[elmnt] = dec.Decimal(outArray[elmnt])
return(outArray)
def copy(inArr, elmntType = "int"):
'''Copy a list (particularly of numpy arrays).'''
if elmntType == "nparray":
return([np.copy(x) for x in inArr])
else:
return([x for x in inArr])
def AIC_from_RSS(RSS, numHaps, numSNPs):
'''Calculate AIC from RSS values'''
AIC = 2 * numHaps + (numSNPs * math.log10(RSS/numSNPs))
return(AIC)
def AICc_from_RSS(RSS, numHaps, numSNPs):
'''Calculate AICc from RSS values'''
AIC = 2 * numHaps + (numSNPs * math.log10(RSS/numSNPs)) + (2*numHaps *
(numHaps + 1))/(numSNPs - numHaps - 1)
return(AIC)
def invertArray(inArray):
'''Invert an array of 0s and 1s (such as the Haplotypes array) or an array
between 0 and 1 (such as the SNP Freqs array).'''
OutArray = 1 - inArray
return(OutArray)
def residuals(inSol, inData, inFreqs, poolSize):
'''Calculate residuals for one particular least-squares solution of Ax=b'''
calculated = np.sum((inSol * inData)/poolSize, 1)
resid = np.subtract(inFreqs, calculated)
return(resid)
def ArrayHaps(origHaps, newHaps):
allHapsToArray = [origHaps]
allHapsToArray.extend(newHaps)
return(np.concatenate(allHapsToArray, axis=1))
def numDiffs(inHap1, inHap2):
if inHap1.shape != inHap2.shape:
raise
else:
in1 = inHap1.ravel()
in2 = inHap2.ravel()
diffCounter = sum([0 if in1[x] == in2[x] else 1
for x in xrange(len(in1)) ])
return(diffCounter)
def areEqual(inHap1, inHap2):
if inHap1.shape != inHap2.shape:
return(False)
else:
return(np.all(inHap1 == inHap2))
def FindLegalSnpsByNetwork(inHaps, testHapIdx):
closestHaps = []
closestDiffs = []
notClosest = []
numSnps = len(inHaps[0])
distances=[numSnps - np.sum(a==inHaps[testHapIdx]) for a in inHaps]
# Determine the distance between this haplotype and every other haplotype
# in number of SNPs different
# Sort by closeness
distIters = sorted(range(len(distances)), key=lambda x: distances[x])
# For each haplotype, from closest to furthest away, check if it shares a
# difference in the target SNP
# with another haplotype in closestHaps
for hapIter in distIters:
if hapIter != testHapIdx:
if closestHaps == []:
# If no haplotype is closest yet, this one is the closest
closestHaps.append(hapIter)
closestDiffs.append([])
notClosest.append([])
for x in xrange(numSnps):
if inHaps[hapIter][x] == inHaps[testHapIdx][x]:
pass
else:
closestDiffs[-1].append(x)
else:
# Otherwise, test to see if this haplotype shares a different
# SNP with any closer haplotype
diffBranch = True
for closeHap in xrange(len(closestHaps)):
for difSnp in xrange(len(closestDiffs[closeHap])):
tmpPointer = closestDiffs[closeHap][difSnp]
if (inHaps[hapIter][tmpPointer] ==
inHaps[testHapIdx][tmpPointer]):
notClosest[closeHap].append(difSnp)
else:
diffBranch = False
if diffBranch:
closestHaps.append(hapIter)
notClosest.append([])
closestDiffs.append([])
for x in xrange(numSnps):
if inHaps[hapIter][x] == inHaps[testHapIdx][x]:
pass
else:
closestDiffs[-1].append(x)
CanChange = []
for hap in xrange(len(closestHaps)):
CanChange.extend(closestDiffs[hap])
return(closestHaps[0],CanChange)
def ValidSnpsFromPhylogeny(inHaps, numInitHaps):
countDiffs = [[(a!=b) for a in inHaps] for b in inHaps]
diffSnps = [[[b for b in xrange(len(countDiffs[x][a]))
if countDiffs[x][a][b] == True]
for a in xrange(len(countDiffs[x]))]
for x in xrange(len(countDiffs))]
# Find adgacend haplotypes for each haplotype
validSnps = []
nextHaps = []
for hap in xrange(len(inHaps)):
nextHaps.append([])
validSnps.append([])
minDistOrder = sorted(range(len(inHaps)),
key=lambda x: len(diffSnps[hap][x]))
# print("DEBUG")
for hap2 in minDistOrder:
if hap != hap2:
if len(nextHaps[-1]) > 0:
isAdj = True
for closeHap in nextHaps[-1]:
if len(np.intersect1d(diffSnps[hap][closeHap],
diffSnps[hap][hap2])) != 0:
isAdj = False
validSnps[-1] = list(np.setdiff1d(validSnps[-1],
diffSnps[closeHap][hap2]))
if isAdj == True:
nextHaps[-1].append(hap2)
validSnps[-1].extend(diffSnps[hap][hap2])
else:
nextHaps[-1].append(hap2)
validSnps[-1].extend(diffSnps[hap][hap2])
validSnps2 = []
nextHaps2 = []
for hap in xrange(numInitHaps):
nextHaps2.append([])
validSnps2.append([])
minDistOrder = sorted(range(numInitHaps),
key=lambda x: len(diffSnps[hap][x]))
# print("DEBUG")
for hap2 in minDistOrder:
if hap != hap2:
if len(nextHaps2[-1]) > 0:
isAdj = True
for closeHap in nextHaps2[-1]:
if len(np.intersect1d(diffSnps[hap][closeHap],
diffSnps[hap][hap2])) != 0:
isAdj = False
validSnps2[-1] = list(np.setdiff1d(validSnps[-1],
diffSnps[closeHap][hap2]))
if isAdj == True:
nextHaps2[-1].append(hap2)
validSnps2[-1].extend(diffSnps[hap][hap2])
else:
nextHaps2[-1].append(hap2)
validSnps2[-1].extend(diffSnps[hap][hap2])
for hapID in xrange(len(validSnps2)):
for iter1 in xrange(len(validSnps2[hapID])):
if validSnps2[hapID][iter1] not in validSnps[hapID]:
validSnps[hapID].append(validSnps2[hapID][iter1])
return(validSnps)
def DecHapToNPHap(decHap):
'''Convert a decimal haplotype back into a numpy array'''
binHap = bin(decHap)[2:]
binHap = np.array([dec.Decimal(x) for x in binHap[1:]])
return(binHap)
| |
"""Support KNX devices."""
import logging
import voluptuous as vol
from xknx import XKNX
from xknx.devices import ActionCallback, DateTime, DateTimeBroadcastType, ExposeSensor
from xknx.dpt import DPTArray, DPTBinary
from xknx.exceptions import XKNXException
from xknx.io import DEFAULT_MCAST_PORT, ConnectionConfig, ConnectionType
from xknx.telegram import AddressFilter, GroupAddress, Telegram
from homeassistant.const import (
CONF_ENTITY_ID,
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.script import Script
_LOGGER = logging.getLogger(__name__)
DOMAIN = "knx"
DATA_KNX = "data_knx"
CONF_KNX_CONFIG = "config_file"
CONF_KNX_ROUTING = "routing"
CONF_KNX_TUNNELING = "tunneling"
CONF_KNX_LOCAL_IP = "local_ip"
CONF_KNX_FIRE_EVENT = "fire_event"
CONF_KNX_FIRE_EVENT_FILTER = "fire_event_filter"
CONF_KNX_STATE_UPDATER = "state_updater"
CONF_KNX_RATE_LIMIT = "rate_limit"
CONF_KNX_EXPOSE = "expose"
CONF_KNX_EXPOSE_TYPE = "type"
CONF_KNX_EXPOSE_ATTRIBUTE = "attribute"
CONF_KNX_EXPOSE_DEFAULT = "default"
CONF_KNX_EXPOSE_ADDRESS = "address"
SERVICE_KNX_SEND = "send"
SERVICE_KNX_ATTR_ADDRESS = "address"
SERVICE_KNX_ATTR_PAYLOAD = "payload"
ATTR_DISCOVER_DEVICES = "devices"
TUNNELING_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_KNX_LOCAL_IP): cv.string,
vol.Optional(CONF_PORT): cv.port,
}
)
ROUTING_SCHEMA = vol.Schema({vol.Optional(CONF_KNX_LOCAL_IP): cv.string})
EXPOSE_SCHEMA = vol.Schema(
{
vol.Required(CONF_KNX_EXPOSE_TYPE): cv.string,
vol.Optional(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_KNX_EXPOSE_ATTRIBUTE): cv.string,
vol.Optional(CONF_KNX_EXPOSE_DEFAULT): cv.match_all,
vol.Required(CONF_KNX_EXPOSE_ADDRESS): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_KNX_CONFIG): cv.string,
vol.Exclusive(CONF_KNX_ROUTING, "connection_type"): ROUTING_SCHEMA,
vol.Exclusive(CONF_KNX_TUNNELING, "connection_type"): TUNNELING_SCHEMA,
vol.Inclusive(CONF_KNX_FIRE_EVENT, "fire_ev"): cv.boolean,
vol.Inclusive(CONF_KNX_FIRE_EVENT_FILTER, "fire_ev"): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_KNX_STATE_UPDATER, default=True): cv.boolean,
vol.Optional(CONF_KNX_RATE_LIMIT, default=20): vol.All(
vol.Coerce(int), vol.Range(min=1, max=100)
),
vol.Optional(CONF_KNX_EXPOSE): vol.All(cv.ensure_list, [EXPOSE_SCHEMA]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_KNX_SEND_SCHEMA = vol.Schema(
{
vol.Required(SERVICE_KNX_ATTR_ADDRESS): cv.string,
vol.Required(SERVICE_KNX_ATTR_PAYLOAD): vol.Any(
cv.positive_int, [cv.positive_int]
),
}
)
async def async_setup(hass, config):
"""Set up the KNX component."""
try:
hass.data[DATA_KNX] = KNXModule(hass, config)
hass.data[DATA_KNX].async_create_exposures()
await hass.data[DATA_KNX].start()
except XKNXException as ex:
_LOGGER.warning("Can't connect to KNX interface: %s", ex)
hass.components.persistent_notification.async_create(
f"Can't connect to KNX interface: <br><b>{ex}</b>", title="KNX"
)
for component, discovery_type in (
("switch", "Switch"),
("climate", "Climate"),
("cover", "Cover"),
("light", "Light"),
("sensor", "Sensor"),
("binary_sensor", "BinarySensor"),
("scene", "Scene"),
("notify", "Notification"),
):
found_devices = _get_devices(hass, discovery_type)
hass.async_create_task(
discovery.async_load_platform(
hass, component, DOMAIN, {ATTR_DISCOVER_DEVICES: found_devices}, config
)
)
hass.services.async_register(
DOMAIN,
SERVICE_KNX_SEND,
hass.data[DATA_KNX].service_send_to_knx_bus,
schema=SERVICE_KNX_SEND_SCHEMA,
)
return True
def _get_devices(hass, discovery_type):
"""Get the KNX devices."""
return list(
map(
lambda device: device.name,
filter(
lambda device: type(device).__name__ == discovery_type,
hass.data[DATA_KNX].xknx.devices,
),
)
)
class KNXModule:
"""Representation of KNX Object."""
def __init__(self, hass, config):
"""Initialize of KNX module."""
self.hass = hass
self.config = config
self.connected = False
self.init_xknx()
self.register_callbacks()
self.exposures = []
def init_xknx(self):
"""Initialize of KNX object."""
self.xknx = XKNX(
config=self.config_file(),
loop=self.hass.loop,
rate_limit=self.config[DOMAIN][CONF_KNX_RATE_LIMIT],
)
async def start(self):
"""Start KNX object. Connect to tunneling or Routing device."""
connection_config = self.connection_config()
await self.xknx.start(
state_updater=self.config[DOMAIN][CONF_KNX_STATE_UPDATER],
connection_config=connection_config,
)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self.stop)
self.connected = True
async def stop(self, event):
"""Stop KNX object. Disconnect from tunneling or Routing device."""
await self.xknx.stop()
def config_file(self):
"""Resolve and return the full path of xknx.yaml if configured."""
config_file = self.config[DOMAIN].get(CONF_KNX_CONFIG)
if not config_file:
return None
if not config_file.startswith("/"):
return self.hass.config.path(config_file)
return config_file
def connection_config(self):
"""Return the connection_config."""
if CONF_KNX_TUNNELING in self.config[DOMAIN]:
return self.connection_config_tunneling()
if CONF_KNX_ROUTING in self.config[DOMAIN]:
return self.connection_config_routing()
return self.connection_config_auto()
def connection_config_routing(self):
"""Return the connection_config if routing is configured."""
local_ip = self.config[DOMAIN][CONF_KNX_ROUTING].get(CONF_KNX_LOCAL_IP)
return ConnectionConfig(
connection_type=ConnectionType.ROUTING, local_ip=local_ip
)
def connection_config_tunneling(self):
"""Return the connection_config if tunneling is configured."""
gateway_ip = self.config[DOMAIN][CONF_KNX_TUNNELING][CONF_HOST]
gateway_port = self.config[DOMAIN][CONF_KNX_TUNNELING].get(CONF_PORT)
local_ip = self.config[DOMAIN][CONF_KNX_TUNNELING].get(CONF_KNX_LOCAL_IP)
if gateway_port is None:
gateway_port = DEFAULT_MCAST_PORT
return ConnectionConfig(
connection_type=ConnectionType.TUNNELING,
gateway_ip=gateway_ip,
gateway_port=gateway_port,
local_ip=local_ip,
auto_reconnect=True,
)
def connection_config_auto(self):
"""Return the connection_config if auto is configured."""
# pylint: disable=no-self-use
return ConnectionConfig()
def register_callbacks(self):
"""Register callbacks within XKNX object."""
if (
CONF_KNX_FIRE_EVENT in self.config[DOMAIN]
and self.config[DOMAIN][CONF_KNX_FIRE_EVENT]
):
address_filters = list(
map(AddressFilter, self.config[DOMAIN][CONF_KNX_FIRE_EVENT_FILTER])
)
self.xknx.telegram_queue.register_telegram_received_cb(
self.telegram_received_cb, address_filters
)
@callback
def async_create_exposures(self):
"""Create exposures."""
if CONF_KNX_EXPOSE not in self.config[DOMAIN]:
return
for to_expose in self.config[DOMAIN][CONF_KNX_EXPOSE]:
expose_type = to_expose.get(CONF_KNX_EXPOSE_TYPE)
entity_id = to_expose.get(CONF_ENTITY_ID)
attribute = to_expose.get(CONF_KNX_EXPOSE_ATTRIBUTE)
default = to_expose.get(CONF_KNX_EXPOSE_DEFAULT)
address = to_expose.get(CONF_KNX_EXPOSE_ADDRESS)
if expose_type in ["time", "date", "datetime"]:
exposure = KNXExposeTime(self.xknx, expose_type, address)
exposure.async_register()
self.exposures.append(exposure)
else:
exposure = KNXExposeSensor(
self.hass,
self.xknx,
expose_type,
entity_id,
attribute,
default,
address,
)
exposure.async_register()
self.exposures.append(exposure)
async def telegram_received_cb(self, telegram):
"""Call invoked after a KNX telegram was received."""
self.hass.bus.async_fire(
"knx_event",
{"address": str(telegram.group_address), "data": telegram.payload.value},
)
# False signals XKNX to proceed with processing telegrams.
return False
async def service_send_to_knx_bus(self, call):
"""Service for sending an arbitrary KNX message to the KNX bus."""
attr_payload = call.data.get(SERVICE_KNX_ATTR_PAYLOAD)
attr_address = call.data.get(SERVICE_KNX_ATTR_ADDRESS)
def calculate_payload(attr_payload):
"""Calculate payload depending on type of attribute."""
if isinstance(attr_payload, int):
return DPTBinary(attr_payload)
return DPTArray(attr_payload)
payload = calculate_payload(attr_payload)
address = GroupAddress(attr_address)
telegram = Telegram()
telegram.payload = payload
telegram.group_address = address
await self.xknx.telegrams.put(telegram)
class KNXAutomation:
"""Wrapper around xknx.devices.ActionCallback object.."""
def __init__(self, hass, device, hook, action, counter=1):
"""Initialize Automation class."""
self.hass = hass
self.device = device
script_name = f"{device.get_name()} turn ON script"
self.script = Script(hass, action, script_name)
self.action = ActionCallback(
hass.data[DATA_KNX].xknx, self.script.async_run, hook=hook, counter=counter
)
device.actions.append(self.action)
class KNXExposeTime:
"""Object to Expose Time/Date object to KNX bus."""
def __init__(self, xknx, expose_type, address):
"""Initialize of Expose class."""
self.xknx = xknx
self.type = expose_type
self.address = address
self.device = None
@callback
def async_register(self):
"""Register listener."""
broadcast_type_string = self.type.upper()
broadcast_type = DateTimeBroadcastType[broadcast_type_string]
self.device = DateTime(
self.xknx, "Time", broadcast_type=broadcast_type, group_address=self.address
)
self.xknx.devices.add(self.device)
class KNXExposeSensor:
"""Object to Expose Home Assistant entity to KNX bus."""
def __init__(self, hass, xknx, expose_type, entity_id, attribute, default, address):
"""Initialize of Expose class."""
self.hass = hass
self.xknx = xknx
self.type = expose_type
self.entity_id = entity_id
self.expose_attribute = attribute
self.expose_default = default
self.address = address
self.device = None
@callback
def async_register(self):
"""Register listener."""
if self.expose_attribute is not None:
_name = self.entity_id + "__" + self.expose_attribute
else:
_name = self.entity_id
self.device = ExposeSensor(
self.xknx, name=_name, group_address=self.address, value_type=self.type,
)
self.xknx.devices.add(self.device)
async_track_state_change_event(
self.hass, [self.entity_id], self._async_entity_changed
)
async def _async_entity_changed(self, event):
"""Handle entity change."""
new_state = event.data.get("new_state")
if new_state is None:
return
if new_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE):
return
if self.expose_attribute is not None:
new_attribute = new_state.attributes.get(self.expose_attribute)
old_state = event.data.get("old_state")
if old_state is not None:
old_attribute = old_state.attributes.get(self.expose_attribute)
if old_attribute == new_attribute:
# don't send same value sequentially
return
await self._async_set_knx_value(new_attribute)
else:
await self._async_set_knx_value(new_state.state)
async def _async_set_knx_value(self, value):
"""Set new value on xknx ExposeSensor."""
if value is None:
if self.expose_default is None:
return
value = self.expose_default
if self.type == "binary":
if value == STATE_ON:
value = True
elif value == STATE_OFF:
value = False
await self.device.set(value)
| |
# file openpyxl/workbook.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Workbook is the top-level container for all document information."""
__docformat__ = "restructuredtext en"
# Python stdlib imports
import datetime
import os
import threading
# package imports
from openpyxl.worksheet import Worksheet
from openpyxl.writer.dump_worksheet import DumpWorksheet, save_dump
from openpyxl.writer.strings import StringTableBuilder
from openpyxl.namedrange import NamedRange
from openpyxl.style import Style
from openpyxl.writer.excel import save_workbook
from openpyxl.shared.exc import ReadOnlyWorkbookException
from openpyxl.shared.date_time import CALENDAR_WINDOWS_1900, CALENDAR_MAC_1904
from openpyxl.shared.xmltools import fromstring
from openpyxl.shared.ooxml import NAMESPACES, SHEET_MAIN_NS
class DocumentProperties(object):
"""High-level properties of the document."""
def __init__(self):
self.creator = 'Unknown'
self.last_modified_by = self.creator
self.created = datetime.datetime.now()
self.modified = datetime.datetime.now()
self.title = 'Untitled'
self.subject = ''
self.description = ''
self.keywords = ''
self.category = ''
self.company = 'Microsoft Corporation'
self.excel_base_date = CALENDAR_WINDOWS_1900
class DocumentSecurity(object):
"""Security information about the document."""
def __init__(self):
self.lock_revision = False
self.lock_structure = False
self.lock_windows = False
self.revision_password = ''
self.workbook_password = ''
class Workbook(object):
"""Workbook is the container for all other parts of the document."""
def __init__(self, optimized_write=False, encoding='utf-8',
worksheet_class=Worksheet,
optimized_worksheet_class=DumpWorksheet,
guess_types=True):
self.worksheets = []
self._active_sheet_index = 0
self._named_ranges = []
self.properties = DocumentProperties()
self.style = Style()
self.security = DocumentSecurity()
self.__optimized_write = optimized_write
self.__optimized_read = False
self.__thread_local_data = threading.local()
self.strings_table_builder = StringTableBuilder()
self.loaded_theme = None
self._worksheet_class = worksheet_class
self._optimized_worksheet_class = optimized_worksheet_class
self.vba_archive = None
self.style_properties = None
self._guess_types = guess_types
self.encoding = encoding
if not optimized_write:
self.worksheets.append(self._worksheet_class(parent_workbook=self))
def read_workbook_settings(self, xml_source):
root = fromstring(xml_source)
view = root.find('*/' '{%s}workbookView' % SHEET_MAIN_NS)
if 'activeTab' in view.attrib:
self._active_sheet_index = int(view.attrib['activeTab'])
@property
def _local_data(self):
return self.__thread_local_data
@property
def excel_base_date(self):
return self.properties.excel_base_date
def _set_optimized_read(self):
self.__optimized_read = True
def get_active_sheet(self):
"""Returns the current active sheet."""
return self.worksheets[self._active_sheet_index]
def create_sheet(self, index=None, title=None):
"""Create a worksheet (at an optional index).
:param index: optional position at which the sheet will be inserted
:type index: int
"""
if self.__optimized_read:
raise ReadOnlyWorkbookException('Cannot create new sheet in a read-only workbook')
if self.__optimized_write :
new_ws = self._optimized_worksheet_class(
parent_workbook=self, title=title)
else:
if title is not None:
new_ws = self._worksheet_class(
parent_workbook=self, title=title)
else:
new_ws = self._worksheet_class(parent_workbook=self)
self.add_sheet(worksheet=new_ws, index=index)
return new_ws
def add_sheet(self, worksheet, index=None):
"""Add an existing worksheet (at an optional index)."""
assert isinstance(worksheet, self._worksheet_class), "The parameter you have given is not of the type '%s'" % self._worksheet_class.__name__
if index is None:
index = len(self.worksheets)
self.worksheets.insert(index, worksheet)
def remove_sheet(self, worksheet):
"""Remove a worksheet from this workbook."""
self.worksheets.remove(worksheet)
def get_sheet_by_name(self, name):
"""Returns a worksheet by its name.
Returns None if no worksheet has the name specified.
:param name: the name of the worksheet to look for
:type name: string
"""
requested_sheet = None
for sheet in self.worksheets:
if sheet.title == name:
requested_sheet = sheet
break
return requested_sheet
def get_index(self, worksheet):
"""Return the index of the worksheet."""
return self.worksheets.index(worksheet)
def get_sheet_names(self):
"""Returns the list of the names of worksheets in the workbook.
Names are returned in the worksheets order.
:rtype: list of strings
"""
return [s.title for s in self.worksheets]
def create_named_range(self, name, worksheet, range, scope=None):
"""Create a new named_range on a worksheet"""
assert isinstance(worksheet, self._worksheet_class)
named_range = NamedRange(name, [(worksheet, range)], scope)
self.add_named_range(named_range)
def get_named_ranges(self):
"""Return all named ranges"""
return self._named_ranges
def add_named_range(self, named_range):
"""Add an existing named_range to the list of named_ranges."""
self._named_ranges.append(named_range)
def get_named_range(self, name):
"""Return the range specified by name."""
requested_range = None
for named_range in self._named_ranges:
if named_range.name == name:
requested_range = named_range
break
return requested_range
def remove_named_range(self, named_range):
"""Remove a named_range from this workbook."""
self._named_ranges.remove(named_range)
def save(self, filename):
"""Save the current workbook under the given `filename`.
Use this function instead of using an `ExcelWriter`.
.. warning::
When creating your workbook using `optimized_write` set to True,
you will only be able to call this function once. Subsequents attempts to
modify or save the file will raise an :class:`openpyxl.shared.exc.WorkbookAlreadySaved` exception.
"""
if self.__optimized_write:
save_dump(self, filename)
else:
save_workbook(self, filename)
| |
"""
These set of functions help the algorithms of MSAF to read and write files
of the Segmentation Dataset.
"""
from collections import Counter
import datetime
import glob
import json
import logging
import numpy as np
import os
from threading import Thread
# Local stuff
import msaf
from msaf import jams2
from msaf import utils
class FileStruct:
def __init__(self, audio_file):
"""Creates the entire file structure given the audio file."""
self.ds_path = os.path.dirname(os.path.dirname(audio_file))
self.audio_file = audio_file
self.est_file = self._get_dataset_file(msaf.Dataset.estimations_dir,
msaf.Dataset.estimations_ext)
self.features_file = self._get_dataset_file(msaf.Dataset.features_dir,
msaf.Dataset.features_ext)
self.ref_file = self._get_dataset_file(msaf.Dataset.references_dir,
msaf.Dataset.references_ext)
def _get_dataset_file(self, dir, ext):
"""Gets the desired dataset file."""
audio_file_ext = "." + self.audio_file.split(".")[-1]
base_file = os.path.basename(self.audio_file).replace(
audio_file_ext, ext)
return os.path.join(self.ds_path, dir, base_file)
def __repr__(self):
"""Prints the file structure."""
return "FileStruct(\n\tds_path=%s,\n\taudio_file=%s,\n\test_file=%s," \
"\n\tfeatures_file=%s,\n\tref_file=%s\n)" % (
self.ds_path, self.audio_file, self.est_file,
self.features_file, self.ref_file)
def has_same_parameters(est_params, boundaries_id, labels_id, params):
"""Checks whether the parameters in params are the same as the estimated
parameters in est_params."""
K = 0
for param_key in params.keys():
if param_key in est_params.keys() and \
est_params[param_key] == params[param_key] and \
est_params["boundaries_id"] == boundaries_id and \
((labels_id is None and est_params["labels_id"] is None)
or (est_params["labels_id"] == labels_id)):
K += 1
return K == len(params.keys())
def find_estimation(all_estimations, boundaries_id, labels_id, params,
est_file):
"""Finds the correct estimation from all the estimations contained in a
JAMS file given the specified arguments.
Parameters
----------
all_estimations : list
List of section Range Annotations from a JAMS file.
boundaries_id : str
Identifier of the algorithm used to compute the boundaries.
labels_id : str
Identifier of the algorithm used to compute the labels.
params : dict
Additional search parameters. E.g. {"feature" : "hpcp"}.
est_file : str
Path to the estimated file (JAMS file).
Returns
-------
correct_est : RangeAnnotation
Correct estimation found in all the estimations.
None if it couldn't be found.
corect_i : int
Index of the estimation in the all_estimations list.
"""
correct_est = None
correct_i = -1
found = False
for i, estimation in enumerate(all_estimations):
est_params = estimation.sandbox
if has_same_parameters(est_params, boundaries_id, labels_id,
params) and not found:
correct_est = estimation
correct_i = i
found = True
elif has_same_parameters(est_params, boundaries_id, labels_id,
params) and found:
logging.warning("Multiple estimations match your parameters in "
"file %s" % est_file)
correct_est = estimation
correct_i = i
return correct_est, correct_i
def read_estimations(est_file, boundaries_id, labels_id=None, **params):
"""Reads the estimations (boundaries and/or labels) from a jams file
containing the estimations of an algorithm.
Parameters
----------
est_file : str
Path to the estimated file (JAMS file).
boundaries_id : str
Identifier of the algorithm used to compute the boundaries.
labels_id : str
Identifier of the algorithm used to compute the labels.
params : dict
Additional search parameters. E.g. {"feature" : "hpcp"}.
Returns
-------
boundaries : np.array((N,2))
Array containing the estimated boundaries in intervals.
labels : np.array(N)
Array containing the estimated labels.
Empty array if labels_id is None.
"""
# Open file and read jams
try:
jam = jams2.load(est_file)
except:
logging.error("Could not open JAMS file %s" % est_file)
return np.array([]), np.array([])
# Get all the estimations for the sections
all_estimations = jam.sections
# Find correct estimation
correct_est, i = find_estimation(all_estimations, boundaries_id, labels_id,
params, est_file)
if correct_est is None:
logging.error("Could not find estimation in %s" % est_file)
return np.array([]), np.array([])
# Retrieve unique levels of segmentation
levels = []
for range in correct_est.data:
levels.append(range.label.context)
levels = list(set(levels))
# Retrieve data
all_boundaries = []
all_labels = []
for level in levels:
boundaries = []
labels = []
for range in correct_est.data:
if level == range.label.context:
boundaries.append([range.start.value, range.end.value])
if labels_id is not None:
labels.append(range.label.value)
all_boundaries.append(np.asarray(boundaries))
all_labels.append(np.asarray(labels, dtype=int))
# If there is only one level, return np.arrays instead of lists
if len(levels) == 1:
all_boundaries = all_boundaries[0]
all_labels = all_labels[0]
return all_boundaries, all_labels
def get_algo_ids(est_file):
"""Gets the algorithm ids that are contained in the est_file."""
with open(est_file, "r") as f:
est_data = json.load(f)
algo_ids = est_data["boundaries"].keys()
return algo_ids
def read_references(audio_path, annotator_id=0):
"""Reads the boundary times and the labels.
Parameters
----------
audio_path : str
Path to the audio file
Returns
-------
ref_times : list
List of boundary times
ref_labels : list
List of labels
"""
# Dataset path
ds_path = os.path.dirname(os.path.dirname(audio_path))
# Read references
jam_path = os.path.join(ds_path, msaf.Dataset.references_dir,
os.path.basename(audio_path)[:-4] +
msaf.Dataset.references_ext)
ds_prefix = os.path.basename(audio_path).split("_")[0]
# Get context
if ds_prefix in msaf.prefix_dict.keys():
context = msaf.prefix_dict[ds_prefix]
else:
context = "function"
try:
ref_inters, ref_labels = jams2.converters.load_jams_range(
jam_path, "sections", context=context, annotator=annotator_id)
except:
logging.warning("Reference not found in %s" % jam_path)
return []
# Intervals to times
ref_times = utils.intervals_to_times(ref_inters)
return ref_times, ref_labels
def read_ref_labels(audio_path):
"""Reads the annotated labels from the given audio path."""
ref_times, ref_labels = read_references(audio_path)
return ref_labels
def read_ref_int_labels(audio_path):
"""Reads the annotated labels using unique integers as identifiers
instead of strings."""
ref_labels = read_ref_labels(audio_path)
labels = []
label_dict = {}
k = 1
for ref_label in ref_labels:
if ref_label in label_dict.keys():
labels.append(label_dict[ref_label])
else:
label_dict[ref_label] = k
labels.append(k)
k += 1
return labels
def align_times(times, frames):
"""Aligns the times to the closes frame times (e.g. beats)."""
dist = np.minimum.outer(times, frames)
bound_frames = np.argmax(np.maximum(0, dist), axis=1)
return np.unique(bound_frames)
def read_ref_bound_frames(audio_path, beats):
"""Reads the corresponding references file to retrieve the boundaries
in frames."""
ref_times, ref_labels = read_references(audio_path)
# align with beats
bound_frames = align_times(ref_times, beats)
return bound_frames
def get_features(audio_path, annot_beats=False, framesync=False,
pre_features=None):
"""
Gets the features of an audio file given the audio_path.
Parameters
----------
audio_path: str
Path to the audio file.
annot_beats: bool
Whether to use annotated beats or not.
framesync: bool
Whether to use framesync features or not.
pre_features: dict
Pre computed features as a dictionary.
`None` for reading them form the json file.
Return
------
C: np.array((N, 12))
(Beat-sync) Chromagram
M: np.array((N, 13))
(Beat-sync) MFCC
T: np.array((N, 6))
(Beat-sync) Tonnetz
cqt: np.array((N, msaf.Anal.cqt_bins))
(Beat-sync) Constant-Q transform
beats: np.array(T)
Beats in seconds
dur: float
Song duration
analysis : dict
Parameters of analysis of track (e.g. sampling rate)
"""
if pre_features is None:
# Dataset path
ds_path = os.path.dirname(os.path.dirname(audio_path))
# Read Estimations
features_path = os.path.join(ds_path, msaf.Dataset.features_dir,
os.path.basename(audio_path)[:-4] + msaf.Dataset.features_ext)
with open(features_path, "r") as f:
feats = json.load(f)
# Beat Synchronous Feats
if framesync:
feat_str = "framesync"
beats = None
else:
if annot_beats:
# Read references
try:
annotation_path = os.path.join(
ds_path, msaf.Dataset.references_dir,
os.path.basename(audio_path)[:-4] +
msaf.Dataset.references_ext)
jam = jams2.load(annotation_path)
except:
raise RuntimeError("No references found in file %s" %
annotation_path)
feat_str = "ann_beatsync"
beats = []
beat_data = jam.beats[0].data
if beat_data == []:
raise ValueError
for data in beat_data:
beats.append(data.time.value)
beats = np.unique(beats)
else:
feat_str = "est_beatsync"
beats = np.asarray(feats["beats"]["times"])
C = np.asarray(feats[feat_str]["hpcp"])
M = np.asarray(feats[feat_str]["mfcc"])
T = np.asarray(feats[feat_str]["tonnetz"])
cqt = np.asarray(feats[feat_str]["cqt"])
analysis = feats["analysis"]
dur = analysis["dur"]
# Frame times might be shorter than the actual number of features.
if framesync:
frame_times = utils.get_time_frames(dur, analysis)
C = C[:len(frame_times)]
M = M[:len(frame_times)]
T = T[:len(frame_times)]
else:
feat_prefix = ""
if not framesync:
feat_prefix = "bs_"
C = pre_features["%shpcp" % feat_prefix]
M = pre_features["%smfcc" % feat_prefix]
T = pre_features["%stonnetz" % feat_prefix]
cqt = pre_features["%scqt" % feat_prefix]
beats = pre_features["beats"]
dur = pre_features["anal"]["dur"]
analysis = pre_features["anal"]
return C, M, T, cqt, beats, dur, analysis
def safe_write(jam, out_file):
"""This method is suposed to be called in a safe thread in order to
avoid interruptions and corrupt the file."""
try:
f = open(out_file, "w")
json.dump(jam, f, indent=2)
finally:
f.close()
def save_estimations(out_file, times, labels, boundaries_id, labels_id,
**params):
"""Saves the segment estimations in a JAMS file.close
Parameters
----------
out_file : str
Path to the output JAMS file in which to save the estimations.
times : np.array or list
Estimated boundary times.
If `list`, estimated hierarchical boundaries.
labels : np.array(N, 2)
Estimated labels (None in case we are only storing boundary
evaluations).
boundaries_id : str
Boundary algorithm identifier.
labels_id : str
Labels algorithm identifier.
params : dict
Dictionary with additional parameters for both algorithms.
"""
# Remove features if they exist
params.pop("features", None)
# Convert to intervals and sanity check
if 'numpy' in str(type(times)):
inters = utils.times_to_intervals(times)
assert len(inters) == len(labels), "Number of boundary intervals " \
"(%d) and labels (%d) do not match" % (len(inters), len(labels))
# Put into lists to simplify the writing process later
inters = [inters]
labels = [labels]
else:
inters = []
for level in range(len(times)):
est_inters = utils.times_to_intervals(times[level])
inters.append(est_inters)
assert len(inters[level]) == len(labels[level]), \
"Number of boundary intervals (%d) and labels (%d) do not match" % \
(len(inters[level]), len(labels[level]))
curr_estimation = None
curr_i = -1
# Find estimation in file
if os.path.isfile(out_file):
jam = jams2.load(out_file)
all_estimations = jam.sections
curr_estimation, curr_i = find_estimation(
all_estimations, boundaries_id, labels_id, params, out_file)
else:
# Create new JAMS if it doesn't exist
jam = jams2.Jams()
jam.metadata.title = os.path.basename(out_file).replace(
msaf.Dataset.estimations_ext, "")
# Create new annotation if needed
if curr_estimation is None:
curr_estimation = jam.sections.create_annotation()
# Save metadata and parameters
curr_estimation.annotation_metadata.attribute = "sections"
curr_estimation.annotation_metadata.version = msaf.__version__
curr_estimation.annotation_metadata.origin = "MSAF"
sandbox = {}
sandbox["boundaries_id"] = boundaries_id
sandbox["labels_id"] = labels_id
sandbox["timestamp"] = \
datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S")
for key in params:
sandbox[key] = params[key]
curr_estimation.sandbox = sandbox
# Save actual data
curr_estimation.data = []
for i, (level_inters, level_labels) in enumerate(zip(inters, labels)):
if level_labels is None:
label = np.ones(len(inters)) * -1
for bound_inter, label in zip(level_inters, level_labels):
segment = curr_estimation.create_datapoint()
segment.start.value = float(bound_inter[0])
segment.start.confidence = 0.0
segment.end.value = float(bound_inter[1])
segment.end.confidence = 0.0
segment.label.value = int(label)
segment.label.confidence = 0.0
segment.label.context = "level_%d" % i
# Place estimation in its place
if curr_i != -1:
jam.sections[curr_i] = curr_estimation
# Write file and do not let users interrupt it
my_thread = Thread(target=safe_write, args=(jam, out_file,))
my_thread.start()
my_thread.join()
def get_all_est_boundaries(est_file, annot_beats, algo_ids=None,
annotator_id=0):
"""Gets all the estimated boundaries for all the algorithms.
Parameters
----------
est_file: str
Path to the estimated file (JSON file)
annot_beats: bool
Whether to use the annotated beats or not.
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
Returns
-------
all_boundaries: list
A list of np.arrays containing the times of the boundaries, one array
for each algorithm
"""
all_boundaries = []
# Get GT boundaries
jam_file = os.path.dirname(est_file) + "/../references/" + \
os.path.basename(est_file).replace("json", "jams")
ds_prefix = os.path.basename(est_file).split("_")[0]
ann_inter, ann_labels = jams2.converters.load_jams_range(
jam_file, "sections", context=msaf.prefix_dict[ds_prefix],
annotator=annotator_id)
ann_times = utils.intervals_to_times(ann_inter)
all_boundaries.append(ann_times)
# Estimations
if algo_ids is None:
algo_ids = get_algo_ids(est_file)
for algo_id in algo_ids:
est_inters = read_estimations(est_file, algo_id, annot_beats,
feature=msaf.feat_dict[algo_id])
if len(est_inters) == 0:
logging.warning("no estimations for algorithm: %s" % algo_id)
continue
boundaries = utils.intervals_to_times(est_inters)
all_boundaries.append(boundaries)
return all_boundaries
def get_all_est_labels(est_file, annot_beats, algo_ids=None, annotator_id=0):
"""Gets all the estimated boundaries for all the algorithms.
Parameters
----------
est_file: str
Path to the estimated file (JSON file)
annot_beats: bool
Whether to use the annotated beats or not.
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
annotator_id : int
Identifier of the annotator.
Returns
-------
gt_times: np.array
Ground Truth boundaries in times.
all_labels: list
A list of np.arrays containing the labels corresponding to the ground
truth boundaries.
"""
all_labels = []
# Get GT boundaries and labels
jam_file = os.path.dirname(est_file) + "/../" + \
msaf.Dataset.references_dir + "/" + \
os.path.basename(est_file).replace("json", "jams")
ds_prefix = os.path.basename(est_file).split("_")[0]
ann_inter, ann_labels = jams2.converters.load_jams_range(
jam_file, "sections", context=msaf.prefix_dict[ds_prefix],
annotator=annotator_id)
gt_times = utils.intervals_to_times(ann_inter)
all_labels.append(ann_labels)
# Estimations
if algo_ids is None:
algo_ids = get_algo_ids(est_file)
for algo_id in algo_ids:
est_labels = read_estimations(est_file, algo_id, annot_beats,
annot_bounds=True, bounds=False,
feature=msaf.feat_dict[algo_id])
if len(est_labels) == 0:
logging.warning("no estimations for algorithm: %s" % algo_id)
continue
all_labels.append(est_labels)
return gt_times, all_labels
def get_all_boundary_algorithms():
"""Gets all the possible boundary algorithms in MSAF.
Returns
-------
algo_ids : list
List of all the IDs of boundary algorithms (strings).
"""
algo_ids = []
for name in msaf.algorithms.__all__:
module = eval(msaf.algorithms.__name__ + "." + name)
if module.is_boundary_type:
algo_ids.append(module.algo_id)
return algo_ids
def get_all_label_algorithms():
"""Gets all the possible label (structural grouping) algorithms in MSAF.
Returns
-------
algo_ids : list
List of all the IDs of label algorithms (strings).
"""
algo_ids = []
for name in msaf.algorithms.__all__:
module = eval(msaf.algorithms.__name__ + "." + name)
if module.is_label_type:
algo_ids.append(module.algo_id)
return algo_ids
def get_configuration(feature, annot_beats, framesync, boundaries_id,
labels_id):
"""Gets the configuration dictionary from the current parameters of the
algorithms to be evaluated."""
config = {}
config["annot_beats"] = annot_beats
config["feature"] = feature
config["framesync"] = framesync
if boundaries_id != "gt":
bound_config = \
eval(msaf.algorithms.__name__ + "." + boundaries_id).config
config.update(bound_config)
if labels_id is not None:
label_config = \
eval(msaf.algorithms.__name__ + "." + labels_id).config
config.update(label_config)
return config
def filter_by_artist(file_structs, artist_name="The Beatles"):
"""Filters data set files by artist name."""
new_file_structs = []
for file_struct in file_structs:
jam = jams2.load(file_struct.ref_file)
if jam.metadata.artist == artist_name:
new_file_structs.append(file_struct)
return new_file_structs
def get_SALAMI_internet(file_structs):
"""Gets the SALAMI Internet subset from SALAMI (bit of a hack...)"""
new_file_structs = []
for file_struct in file_structs:
num = int(os.path.basename(file_struct.est_file).split("_")[1].
split(".")[0])
if num >= 956 and num <= 1498:
new_file_structs.append(file_struct)
return new_file_structs
def get_dataset_files(in_path, ds_name="*"):
"""Gets the files of the dataset with a prefix of ds_name."""
# All datasets
ds_dict = {
"Beatles" : "Isophonics",
"Cerulean" : "Cerulean",
"Epiphyte" : "Epiphyte",
"Isophonics": "Isophonics",
"SALAMI" : "SALAMI",
"SALAMI-i" : "SALAMI",
"*" : "*"
}
try:
prefix = ds_dict[ds_name]
except KeyError:
raise RuntimeError("Dataset %s is not valid. Valid datasets are: %s" %
(ds_name, ds_dict.keys()))
# Get audio files
audio_files = []
for ext in msaf.Dataset.audio_exts:
audio_files += glob.glob(os.path.join(in_path, msaf.Dataset.audio_dir,
("%s_*" + ext) % prefix))
# Check for datasets with different prefix
if len(audio_files) == 0:
for ext in msaf.Dataset.audio_exts:
audio_files += glob.glob(os.path.join(in_path,
msaf.Dataset.audio_dir,
"*" + ext))
# Make sure directories exist
utils.ensure_dir(os.path.join(in_path, msaf.Dataset.features_dir))
utils.ensure_dir(os.path.join(in_path, msaf.Dataset.estimations_dir))
utils.ensure_dir(os.path.join(in_path, msaf.Dataset.references_dir))
# Get the file structs
file_structs = []
for audio_file in audio_files:
file_structs.append(FileStruct(audio_file))
# Filter by the beatles
if ds_name == "Beatles":
file_structs = filter_by_artist(file_structs, "The Beatles")
# Salami Internet hack
if ds_name == "SALAMI-i":
file_structs = get_SALAMI_internet(file_structs)
# Sort by audio file name
file_structs = sorted(file_structs,
key=lambda file_struct: file_struct.audio_file)
return file_structs
def read_hier_references(jams_file, annotation_id=0, exclude_levels=[]):
"""Reads hierarchical references from a jams file.
Parameters
----------
jams_file : str
Path to the jams file.
annotation_id : int > 0
Identifier of the annotator to read from.
exclude_levels: list
List of levels to exclude. Empty list to include all levels.
Returns
-------
hier_bounds : list
List of the segment boundary times in seconds for each level.
hier_labels : list
List of the segment labels for each level.
hier_levels : list
List of strings for the level identifiers.
"""
def get_levels():
"""Obtains the set of unique levels contained in the jams
sorted by the number of segments they contain.
Returns
-------
levels : np.array
Level identifiers for the entire hierarchy.
"""
levels = []
jam = jams2.load(jams_file)
annotation = jam.sections[annotation_id]
for segment in annotation.data:
if segment.label.context not in exclude_levels:
levels.append(segment.label.context)
c = Counter(levels) # Count frequency
# Sort
return np.asarray(list(dict(c).keys()))[np.argsort(list(c.values()))]
def get_segments_in_level(level):
"""Gets the segments of a specific level.
Paramters
---------
level : str
Indentifier of the level within the jams file.
Returns
-------
times : np.array
Boundary times in seconds for the given level.
labels : np.array
Labels for the given level.
"""
intervals, labels = jams2.converters.load_jams_range(jams_file,
"sections", annotator=annotation_id, context=level)
times = utils.intervals_to_times(intervals)
return np.array(times), np.array(labels)
# Get the levels of the annotations in the jams file
hier_levels = get_levels()
# Get the boundaries and labels for each level
hier_bounds = []
hier_labels = []
for level in hier_levels:
bound_times, labels = get_segments_in_level(level)
hier_bounds.append(bound_times)
hier_labels.append(labels)
return hier_bounds, hier_labels, hier_levels
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as base_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class BaseLayerTest(test.TestCase):
def testLayerProperties(self):
layer = base_layers.Layer(name='my_layer')
self.assertListEqual(layer.variables, [])
self.assertListEqual(layer.trainable_variables, [])
self.assertListEqual(layer.non_trainable_variables, [])
self.assertListEqual(layer.updates, [])
self.assertListEqual(layer.losses, [])
self.assertEqual(layer.built, False)
layer = base_layers.Layer(name='my_layer', trainable=False)
self.assertEqual(layer.trainable, False)
def testAddWeight(self):
with self.test_session():
layer = base_layers.Layer(name='my_layer')
# Test basic variable creation.
variable = layer.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
self.assertEqual(variable.name, 'my_layer/my_var:0')
self.assertListEqual(layer.variables, [variable])
self.assertListEqual(layer.trainable_variables, [variable])
self.assertListEqual(layer.non_trainable_variables, [])
self.assertListEqual(
layer.variables,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Test non-trainable variable creation.
# layer.add_variable should work even outside `build` and `call`.
variable_2 = layer.add_variable(
'non_trainable_var', [2, 2],
initializer=init_ops.zeros_initializer(),
trainable=False)
self.assertListEqual(layer.variables, [variable, variable_2])
self.assertListEqual(layer.trainable_variables, [variable])
self.assertListEqual(layer.non_trainable_variables, [variable_2])
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
# Test with regularizer.
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
variable = layer.add_variable(
'reg_var', [2, 2],
initializer=init_ops.zeros_initializer(),
regularizer=regularizer)
self.assertEqual(len(layer.losses), 1)
def testGetVariable(self):
with self.test_session():
class MyLayer(base_layers.Layer):
def build(self, input_shape):
self.my_var = self.add_variable(
'my_var', [2, 2], initializer=init_ops.zeros_initializer())
def call(self, inputs):
return inputs * 2
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
layer.apply(inputs)
layer.apply(inputs)
self.assertListEqual([v.name for v in layer.variables],
['my_layer/my_var:0'])
# Creating a layer with no scope leads to lazy construction of
# the scope at apply() time. It uses scope "<current scope>/base_name"
lazy_layer = MyLayer(_reuse=True)
with variable_scope.variable_scope('new_scope'):
# This should attempt to reuse 'my_var' in 'new_scope'
with self.assertRaisesRegexp(
ValueError, r'new_scope/my_layer/my_var does not exist'):
lazy_layer.apply(inputs)
with variable_scope.variable_scope('my_layer'):
variable_scope.get_variable('my_var', [2, 2])
# Smoke test: it runs.
lazy_layer.apply(inputs)
# The variables were created outside of the Layer, and
# reuse=True, so the Layer does not own them and they are not
# stored in its collection.
self.assertListEqual(lazy_layer.variables, [])
self.assertEqual(lazy_layer._scope.name, 'new_scope/my_layer')
# Creating a layer with no scope leads to lazy construction of
# the scope at apply() time. If 'scope' argument is passed to
# apply(), it uses that scope when accessing variables.
lazy_layer = MyLayer(_reuse=True)
with variable_scope.variable_scope('new_scope') as new_scope:
# This should attempt to reuse 'my_var' in 'new_scope'
with self.assertRaisesRegexp(
ValueError, r'new_scope/my_var does not exist'):
lazy_layer.apply(inputs, scope=new_scope)
variable_scope.get_variable('my_var', [2, 2])
# Smoke test: it runs.
lazy_layer.apply(inputs, scope=new_scope)
# The variables were created outside of the Layer, and
# reuse=True, so the Layer does not own them and they are not
# stored in its collection.
self.assertListEqual(lazy_layer.variables, [])
self.assertEqual(lazy_layer._scope.name, 'new_scope')
with ops.Graph().as_default():
inputs_ng = random_ops.random_uniform((5,), seed=1)
with self.assertRaisesRegexp(ValueError,
r'graph are not the same'):
layer.apply(inputs_ng)
def testCall(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
self.assertEqual(outputs.op.name, 'my_layer/Square')
def testFirstCallCanCreateVariablesButSecondCanNotWhenBuildEmpty(self):
class MyLayer(base_layers.Layer):
def build(self, _):
# Do not mark the layer as built.
pass
def call(self, inputs):
self.my_var = self.add_variable('my_var', [2, 2])
if self.built:
# Skip creating on the first call; try to create after it's
# built. This is expected to fail.
self.add_variable('this_will_break_on_second_call', [2, 2])
return inputs + math_ops.square(self.my_var)
layer = MyLayer(name='my_layer')
inputs = random_ops.random_uniform((2,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
self.assertEqual(outputs.op.name, 'my_layer/add')
self.assertListEqual(
[v.name for v in layer.variables], ['my_layer/my_var:0'])
with self.assertRaisesRegexp(ValueError,
'my_layer/this_will_break_on_second_call'):
layer.apply(inputs)
# The list of variables hasn't changed.
self.assertListEqual(
[v.name for v in layer.variables], ['my_layer/my_var:0'])
def testDeepCopy(self):
class MyLayer(base_layers.Layer):
def call(self, inputs):
return math_ops.square(inputs)
layer = MyLayer(name='my_layer')
layer._private_tensor = random_ops.random_uniform(())
inputs = random_ops.random_uniform((5,), seed=1)
outputs = layer.apply(inputs)
self.assertEqual(layer.built, True)
self.assertEqual(outputs.op.name, 'my_layer/Square')
layer_copy = copy.deepcopy(layer)
self.assertEqual(layer_copy.name, layer.name)
self.assertEqual(layer_copy._scope.name, layer._scope.name)
self.assertEqual(layer_copy._graph, layer._graph)
self.assertEqual(layer_copy._private_tensor, layer._private_tensor)
def testScopeNaming(self):
class PrivateLayer(base_layers.Layer):
def call(self, inputs):
return inputs
inputs = random_ops.random_uniform((5,))
default_layer = PrivateLayer()
_ = default_layer.apply(inputs)
self.assertEqual(default_layer._scope.name, 'private_layer')
default_layer1 = PrivateLayer()
default_layer1.apply(inputs)
self.assertEqual(default_layer1._scope.name, 'private_layer_1')
my_layer = PrivateLayer(name='my_layer')
my_layer.apply(inputs)
self.assertEqual(my_layer._scope.name, 'my_layer')
my_layer1 = PrivateLayer(name='my_layer')
my_layer1.apply(inputs)
self.assertEqual(my_layer1._scope.name, 'my_layer_1')
my_layer2 = PrivateLayer(name='my_layer')
my_layer2.apply(inputs)
self.assertEqual(my_layer2._scope.name, 'my_layer_2')
# Name scope shouldn't affect names.
with ops.name_scope('some_name_scope'):
default_layer2 = PrivateLayer()
default_layer2.apply(inputs)
self.assertEqual(default_layer2._scope.name, 'private_layer_2')
my_layer3 = PrivateLayer(name='my_layer')
my_layer3.apply(inputs)
self.assertEqual(my_layer3._scope.name, 'my_layer_3')
other_layer = PrivateLayer(name='other_layer')
other_layer.apply(inputs)
self.assertEqual(other_layer._scope.name, 'other_layer')
# Variable scope gets added to scope names.
with variable_scope.variable_scope('var_scope'):
default_layer_scoped = PrivateLayer()
default_layer_scoped.apply(inputs)
self.assertEqual(default_layer_scoped._scope.name,
'var_scope/private_layer')
my_layer_scoped = PrivateLayer(name='my_layer')
my_layer_scoped.apply(inputs)
self.assertEqual(my_layer_scoped._scope.name, 'var_scope/my_layer')
my_layer_scoped1 = PrivateLayer(name='my_layer')
my_layer_scoped1.apply(inputs)
self.assertEqual(my_layer_scoped1._scope.name, 'var_scope/my_layer_1')
def testInputSpecNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(ndim=2)
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
with self.assertRaisesRegexp(ValueError,
r'expected ndim=2'):
layer.apply(array_ops.placeholder('int32', shape=(None,)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, None)))
def testInputSpecMinNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(min_ndim=2)
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
with self.assertRaisesRegexp(ValueError,
r'expected min_ndim=2'):
layer.apply(array_ops.placeholder('int32', shape=(None,)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, None)))
layer.apply(array_ops.placeholder('int32', shape=(None, None, None)))
def testInputSpecMaxNdimCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(max_ndim=2)
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'requires a defined rank'):
layer.apply(array_ops.placeholder('int32'))
with self.assertRaisesRegexp(ValueError,
r'expected max_ndim=2'):
layer.apply(array_ops.placeholder('int32', shape=(None, None, None)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, None)))
layer.apply(array_ops.placeholder('int32', shape=(None,)))
def testInputSpecDtypeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(dtype='float32')
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'expected dtype=float32'):
layer.apply(array_ops.placeholder('int32'))
# Works
layer.apply(array_ops.placeholder('float32', shape=(None, None)))
def testInputSpecAxesCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(axes={-1: 2})
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'expected axis'):
layer.apply(array_ops.placeholder('int32', shape=(None, 3)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, None, 2)))
layer.apply(array_ops.placeholder('int32', shape=(None, 2)))
def testInputSpecShapeCheck(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = base_layers.InputSpec(shape=(None, 3))
def call(self, inputs):
return inputs
layer = CustomerLayer()
with self.assertRaisesRegexp(ValueError,
r'expected shape'):
layer.apply(array_ops.placeholder('int32', shape=(None, 2)))
# Works
layer.apply(array_ops.placeholder('int32', shape=(None, 3)))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
def testNoInputSpec(self):
class CustomerLayer(base_layers.Layer):
def __init__(self):
super(CustomerLayer, self).__init__()
self.input_spec = None
def call(self, inputs):
return inputs
layer = CustomerLayer()
# Works
layer.apply(array_ops.placeholder('int32'))
layer.apply(array_ops.placeholder('int32', shape=(2, 3)))
def test_get_updates_for(self):
a = base_layers.Input(shape=(2,))
dense_layer = core_layers.Dense(1)
dense_layer.add_update(0, inputs=a)
dense_layer.add_update(1, inputs=None)
self.assertListEqual(dense_layer.get_updates_for(a), [0])
self.assertListEqual(dense_layer.get_updates_for(None), [1])
def test_get_losses_for(self):
a = base_layers.Input(shape=(2,))
dense_layer = core_layers.Dense(1)
dense_layer.add_loss(0, inputs=a)
dense_layer.add_loss(1, inputs=None)
self.assertListEqual(dense_layer.get_losses_for(a), [0])
self.assertListEqual(dense_layer.get_losses_for(None), [1])
def testTopologicalAttributes(self):
# test layer attributes / methods related to cross-layer connectivity.
a = base_layers.Input(shape=(32,), name='input_a')
b = base_layers.Input(shape=(32,), name='input_b')
# test input, output, input_shape, output_shape
test_layer = core_layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
# test `get_*_at` methods
dense = core_layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
# Test invalid value for attribute retrieval.
with self.assertRaises(ValueError):
dense.get_input_at(2)
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
_ = new_dense.input
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
_ = new_dense.output
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
_ = new_dense.output_shape
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
a = base_layers.Input(shape=(3, 32))
a = base_layers.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = core_layers.Dense(16)
a = base_layers.Input(shape=(3, 32))
a = base_layers.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.output_shape
def testTopologicalAttributesMultiOutputLayer(self):
class PowersLayer(base_layers.Layer):
def call(self, inputs):
return [inputs ** 2, inputs ** 3]
x = base_layers.Input(shape=(32,))
test_layer = PowersLayer()
p1, p2 = test_layer(x) # pylint: disable=not-callable
self.assertEqual(test_layer.input, x)
self.assertEqual(test_layer.output, [p1, p2])
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)])
def testTopologicalAttributesMultiInputLayer(self):
class AddLayer(base_layers.Layer):
def call(self, inputs):
assert len(inputs) == 2
return inputs[0] + inputs[1]
a = base_layers.Input(shape=(32,))
b = base_layers.Input(shape=(32,))
test_layer = AddLayer()
y = test_layer([a, b]) # pylint: disable=not-callable
self.assertEqual(test_layer.input, [a, b])
self.assertEqual(test_layer.output, y)
self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)])
self.assertEqual(test_layer.output_shape, (None, 32))
class NetworkTest(test.TestCase):
def testBasicNetwork(self):
# minimum viable network
x = base_layers.Input(shape=(32,))
dense = core_layers.Dense(2)
y = dense(x)
network = base_layers.Network(x, y, name='dense_network')
# test basic attributes
self.assertEqual(network.name, 'dense_network')
self.assertEqual(len(network.layers), 2) # InputLayer + Dense
self.assertEqual(network.layers[1], dense)
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, dense.trainable_weights)
self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights)
# test callability on Input
x_2 = base_layers.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 2])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 2])
# test network `trainable` attribute
network.trainable = False
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, [])
self.assertEqual(network.non_trainable_weights,
dense.trainable_weights + dense.non_trainable_weights)
def test_node_construction(self):
# test graph topology construction basics
a = base_layers.Input(shape=(32,), name='input_a')
b = base_layers.Input(shape=(32,), name='input_b')
self.assertListEqual(a.get_shape().as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer.inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer.inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertListEqual(node.inbound_layers, [])
self.assertListEqual(node.input_tensors, [a])
self.assertListEqual(node.input_shapes, [(None, 32)])
self.assertListEqual(node.output_tensors, [a])
self.assertListEqual(node.output_shapes, [(None, 32)])
dense = core_layers.Dense(16, name='dense_1')
dense(a)
dense(b)
self.assertEqual(len(dense.inbound_nodes), 2)
self.assertEqual(len(dense.outbound_nodes), 0)
self.assertListEqual(dense.inbound_nodes[0].inbound_layers, [a_layer])
self.assertEqual(dense.inbound_nodes[0].outbound_layer, dense)
self.assertListEqual(dense.inbound_nodes[1].inbound_layers, [b_layer])
self.assertEqual(dense.inbound_nodes[1].outbound_layer, dense)
self.assertListEqual(dense.inbound_nodes[0].input_tensors, [a])
self.assertListEqual(dense.inbound_nodes[1].input_tensors, [b])
# Test config
config_0 = dense.inbound_nodes[0].get_config()
self.assertEqual(config_0['outbound_layer'], dense.name)
def testMultiInputNetwork(self):
a = base_layers.Input(shape=(32,), name='input_a')
b = base_layers.Input(shape=(32,), name='input_b')
class AddLayer(base_layers.Layer):
def call(self, inputs):
assert len(inputs) == 2
return inputs[0] + inputs[1]
c = AddLayer()([a, b]) # pylint: disable=not-callable
network = base_layers.Network([a, b], c)
self.assertEqual(len(network.layers), 3) # 2 * InputLayer + AddLayer
# Test callability.
a2 = base_layers.Input(shape=(32,))
b2 = base_layers.Input(shape=(32,))
c2 = network([a2, b2])
self.assertEqual(c2.get_shape().as_list(), [None, 32])
def testMultiOutputNetwork(self):
x = base_layers.Input(shape=(32,))
y1 = core_layers.Dense(2)(x)
y2 = core_layers.Dense(3)(x)
network = base_layers.Network(x, [y1, y2])
self.assertEqual(len(network.layers), 3) # InputLayer + 2 * Dense
# Test callability.
x2 = base_layers.Input(shape=(32,))
outputs = network(x2)
self.assertEqual(type(outputs), list)
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].get_shape().as_list(), [None, 2])
self.assertEqual(outputs[1].get_shape().as_list(), [None, 3])
def testMultiInputMultiOutputNetworkSharedLayer(self):
a = base_layers.Input(shape=(32,), name='input_a')
b = base_layers.Input(shape=(32,), name='input_b')
dense = core_layers.Dense(2)
y1 = dense(a)
y2 = dense(b)
network = base_layers.Network([a, b], [y1, y2])
self.assertEqual(len(network.layers), 3) # 2 * InputLayer + Dense
# Test callability.
a2 = base_layers.Input(shape=(32,))
b2 = base_layers.Input(shape=(32,))
outputs = network([a2, b2])
self.assertEqual(type(outputs), list)
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].get_shape().as_list(), [None, 2])
self.assertEqual(outputs[1].get_shape().as_list(), [None, 2])
def testCrossDataFlows(self):
# Test the ability to have multi-output layers with outputs that get routed
# to separate layers
class PowersLayer(base_layers.Layer):
def call(self, inputs):
return [inputs ** 2, inputs ** 3]
x = base_layers.Input(shape=(32,))
p1, p2 = PowersLayer()(x) # pylint: disable=not-callable
y1 = core_layers.Dense(2)(p1)
y2 = core_layers.Dense(3)(p2)
network = base_layers.Network(x, [y1, y2])
self.assertEqual(len(network.layers), 4) # InputLayer + 2 * Dense + PLayer
# Test callability.
x2 = base_layers.Input(shape=(32,))
outputs = network(x2)
self.assertEqual(type(outputs), list)
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].get_shape().as_list(), [None, 2])
self.assertEqual(outputs[1].get_shape().as_list(), [None, 3])
def testNetworkAttributes(self):
x = base_layers.Input(shape=(32,))
z = core_layers.Dense(2, kernel_regularizer=lambda x: 0.01 * (x ** 2))(x)
dense = core_layers.Dense(2, name='dense')
dense.add_update(1)
y = dense(z)
net = base_layers.Network(x, y)
# losses
self.assertEqual(len(net.losses), 1)
# updates
self.assertEqual(len(net.updates), 1)
# get_layer
self.assertEqual(net.get_layer('dense'), dense)
self.assertEqual(net.get_layer(index=2), dense)
with self.assertRaises(ValueError):
net.get_layer('dense_unknown')
with self.assertRaises(ValueError):
net.get_layer()
with self.assertRaises(ValueError):
net.get_layer(index=4)
# input, output
self.assertEqual(net.input, x)
self.assertEqual(net.output, y)
# input_shape, output_shape
self.assertEqual(net.input_shape, (None, 32))
self.assertEqual(net.output_shape, (None, 2))
# get_*_at
self.assertEqual(net.get_input_at(0), x)
self.assertEqual(net.get_output_at(0), y)
# _compute_output_shape
self.assertEqual(net._compute_output_shape((3, 32)).as_list(), [3, 2])
def testInvalidNetworks(self):
# redundant inputs
x = base_layers.Input(shape=(32,))
y = core_layers.Dense(2)(x)
with self.assertRaises(ValueError):
base_layers.Network([x, x], y)
# inputs that don't come from Input
x = array_ops.placeholder(dtype='float32', shape=(None, 32))
y = core_layers.Dense(2)(x)
with self.assertRaises(ValueError):
base_layers.Network(x, y)
# inputs that don't come from Input but have a layer history
x = base_layers.Input(shape=(32,))
x = core_layers.Dense(32)(x)
y = core_layers.Dense(2)(x)
with self.assertRaises(ValueError):
base_layers.Network(x, y)
# outputs that don't come from layers
x = base_layers.Input(shape=(32,))
y = core_layers.Dense(2)(x)
y = 2 * y
with self.assertRaises(ValueError):
base_layers.Network(x, y)
# disconnected graphs
x1 = base_layers.Input(shape=(32,))
x2 = base_layers.Input(shape=(32,))
y = core_layers.Dense(2)(x1)
with self.assertRaises(ValueError):
base_layers.Network(x2, y)
# redundant layer names
x = base_layers.Input(shape=(32,))
z = core_layers.Dense(2, name='dense')(x)
y = core_layers.Dense(2, name='dense')(z)
with self.assertRaises(ValueError):
base_layers.Network(x, y)
def testInputTensorWrapping(self):
x = array_ops.placeholder(dtype='float32', shape=(None, 32))
x = base_layers.Input(tensor=x)
y = core_layers.Dense(2)(x)
base_layers.Network(x, y)
def testExplicitBatchSize(self):
x = base_layers.Input(shape=(32,), batch_size=3)
y = core_layers.Dense(2)(x)
self.assertEqual(y.get_shape().as_list(), [3, 2])
def testNetworkRecursion(self):
# test the ability of networks to be used as layers inside networks.
a = base_layers.Input(shape=(32,))
b = core_layers.Dense(2)(a)
net = base_layers.Network(a, b)
c = base_layers.Input(shape=(32,))
d = net(c)
recursive_net = base_layers.Network(c, d)
self.assertEqual(len(recursive_net.layers), 2)
self.assertEqual(recursive_net.layers[1], net)
self.assertEqual(len(recursive_net.weights), 2)
# test callability
x = array_ops.placeholder(dtype='float32', shape=(None, 32))
y = recursive_net(x)
self.assertEqual(y.get_shape().as_list(), [None, 2])
def testSparseInput(self):
class SparseSoftmax(base_layers.Layer):
def call(self, inputs):
return sparse_ops.sparse_softmax(inputs)
x = base_layers.Input(shape=(32,), sparse=True)
y = SparseSoftmax()(x) # pylint: disable=not-callable
network = base_layers.Network(x, y)
self.assertEqual(len(network.layers), 2)
self.assertEqual(network.layers[0].sparse, True)
def testMaskingSingleInput(self):
class MaskedLayer(base_layers.Layer):
def call(self, inputs, mask=None):
if mask is not None:
return inputs * mask
return inputs
def compute_mask(self, inputs, mask=None):
return array_ops.ones_like(inputs)
x = base_layers.Input(shape=(32,))
y = MaskedLayer()(x) # pylint: disable=not-callable
network = base_layers.Network(x, y)
# test callability on Input
x_2 = base_layers.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 32])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 32])
if __name__ == '__main__':
test.main()
| |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.contour import ContourSet
from matplotlib.tri.triangulation import Triangulation
import matplotlib._tri as _tri
import numpy as np
class TriContourSet(ContourSet):
"""
Create and store a set of contour lines or filled regions for
a triangular grid.
User-callable method: clabel
Useful attributes:
ax:
the axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See _process_colors method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw triangular grid contour lines or filled regions,
depending on whether keyword arg 'filled' is False
(default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in TriContourSet.tricontour_doc.
"""
ContourSet.__init__(self, ax, *args, **kwargs)
def _process_args(self, *args, **kwargs):
"""
Process args and kwargs.
"""
if isinstance(args[0], TriContourSet):
C = args[0].cppContourGenerator
if self.levels is None:
self.levels = args[0].levels
else:
tri, z = self._contour_args(args, kwargs)
C = _tri.TriContourGenerator(tri.get_cpp_triangulation(), z)
x0 = tri.x.min()
x1 = tri.x.max()
y0 = tri.y.min()
y1 = tri.y.max()
self.ax.update_datalim([(x0, y0), (x1, y1)])
self.ax.autoscale_view()
self.cppContourGenerator = C
def _get_allsegs_and_allkinds(self):
"""
Create and return allsegs and allkinds by calling underlying C code.
"""
allsegs = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
allkinds = []
for lower, upper in zip(lowers, uppers):
segs, kinds = self.cppContourGenerator.create_filled_contour(
lower, upper)
allsegs.append([segs])
allkinds.append([kinds])
else:
allkinds = None
for level in self.levels:
segs = self.cppContourGenerator.create_contour(level)
allsegs.append(segs)
return allsegs, allkinds
def _contour_args(self, args, kwargs):
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args,
**kwargs)
z = np.asarray(args[0])
if z.shape != tri.x.shape:
raise ValueError('z array must have same length as triangulation x'
' and y arrays')
self.zmax = z.max()
self.zmin = z.min()
if self.logscale and self.zmin <= 0:
raise ValueError('Cannot %s log of negative values.' % fn)
self._contour_level_args(z, args[1:])
return (tri, z)
tricontour_doc = """
Draw contours on an unstructured triangular grid.
:func:`~matplotlib.pyplot.tricontour` and
:func:`~matplotlib.pyplot.tricontourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
The triangulation can be specified in one of two ways; either::
tricontour(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
tricontour(x, y, ...)
tricontour(x, y, triangles, ...)
tricontour(x, y, triangles=triangles, ...)
tricontour(x, y, mask=mask, ...)
tricontour(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of
these possibilities.
The remaining arguments may be::
tricontour(..., Z)
where *Z* is the array of values to contour, one per point
in the triangulation. The level values are chosen
automatically.
::
tricontour(..., Z, N)
contour *N* automatically-chosen levels.
::
tricontour(..., Z, V)
draw contour lines at the values specified in sequence *V*
::
tricontourf(..., Z, V)
fill the (len(*V*)-1) regions between the values in *V*
::
tricontour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
``C = tricontour(...)`` returns a
:class:`~matplotlib.contour.TriContourSet` object.
Optional keyword arguments:
*colors*: [ *None* | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ *None* | Colormap ]
A cm :class:`~matplotlib.colors.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*levels* [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw; e.g., to draw just the zero contour pass
``levels=[0]``
*origin*: [ *None* | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ *None* | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ *None* | ticker.Locator subclass ]
If *locator* is None, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.colors.Colormap.set_under` and
:meth:`matplotlib.colors.Colormap.set_over` methods.
*xunits*, *yunits*: [ *None* | registered units ]
Override axis units by specifying an instance of a
:class:`matplotlib.units.ConversionInterface`.
tricontour-only keyword arguments:
*linewidths*: [ *None* | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the 'solid' is used.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
If contour is using a monochrome colormap and the contour
level is less than 0, then the linestyle specified
in ``contour.negative_linestyle`` in ``matplotlibrc``
will be used.
tricontourf-only keyword arguments:
*antialiased*: [ *True* | *False* ]
enable antialiasing
Note: tricontourf fills intervals that are closed at the top; that
is, for boundaries *z1* and *z2*, the filled region is::
z1 < z <= z2
There is one exception: if the lowest boundary coincides with
the minimum value of the *z* array, then that minimum value
will be included in the lowest interval.
**Examples:**
.. plot:: mpl_examples/pylab_examples/tricontour_demo.py
"""
def tricontour(ax, *args, **kwargs):
if not ax._hold:
ax.cla()
kwargs['filled'] = False
return TriContourSet(ax, *args, **kwargs)
tricontour.__doc__ = TriContourSet.tricontour_doc
def tricontourf(ax, *args, **kwargs):
if not ax._hold:
ax.cla()
kwargs['filled'] = True
return TriContourSet(ax, *args, **kwargs)
tricontourf.__doc__ = TriContourSet.tricontour_doc
| |
try:
import capstone as _capstone
except ImportError:
_capstone = None
try:
import keystone as _keystone
except ImportError:
_keystone = None
try:
import pyvex as _pyvex
except ImportError:
_pyvex = None
from .arch import Arch, register_arch, Endness, Register
from .archerror import ArchError
from .tls import TLSArchInfo
class ArchS390X(Arch):
def __init__(self, endness=Endness.BE):
super(ArchS390X, self).__init__(endness)
if endness != Endness.BE:
raise ArchError('Arch s390x must be big endian')
self.argument_register_positions = {
self.registers['r2'][0]: 0,
self.registers['r3'][0]: 1,
self.registers['r4'][0]: 2,
self.registers['r5'][0]: 3,
self.registers['r6'][0]: 4,
# fp registers
self.registers['f0'][0]: 0,
self.registers['f2'][0]: 1,
self.registers['f4'][0]: 2,
self.registers['f6'][0]: 3,
} if _pyvex is not None else None
bits = 64
vex_arch = 'VexArchS390X' # enum VexArch
name = 'S390X'
qemu_name = 's390x' # target/s390x
triplet = 's390x-linux-gnu'
linux_name = 's390' # arch/s390
max_inst_bytes = 6
ret_offset = 584 # offsetof(VexGuestS390XState, guest_r2)
syscall_num_offset = 576 # offsetof(VexGuestS390XState, guest_r1)
call_pushes_ret = False
stack_change = -8
initial_sp = 0x40000000000
sizeof = {'short': 16, 'int': 32, 'long': 64, 'long long': 64}
if _capstone:
cs_arch = _capstone.CS_ARCH_SYSZ
cs_mode = _capstone.CS_MODE_BIG_ENDIAN
if _keystone:
ks_arch = _keystone.KS_ARCH_SYSTEMZ
ks_mode = _keystone.KS_MODE_BIG_ENDIAN
ret_instruction = b'\x07\xf4' # br %r14
nop_instruction = b'\x07\x07' # nopr %r7
instruction_alignment = 2
register_list = [
Register(name='ia', size=8, alias_names=('ip', 'pc')),
Register(name='r0', size=8,
general_purpose=True),
Register(name='r1', size=8,
general_purpose=True, subregisters=[('r1_32', 4, 4)]),
Register(name='r2', size=8,
general_purpose=True, argument=True,
subregisters=[('r2_32', 4, 4)]),
Register(name='r3', size=8,
general_purpose=True, argument=True,
linux_entry_value='argc',
subregisters = [('r3_32', 4, 4)]),
Register(name='r4', size=8,
general_purpose=True, argument=True,
linux_entry_value='argv',
subregisters=[('r4_32', 4, 4)]),
Register(name='r5', size=8,
general_purpose=True, argument=True,
linux_entry_value='envp',
subregisters=[('r5_32', 4, 4)]),
Register(name='r6', size=8,
general_purpose=True, argument=True, persistent=True,
subregisters=[('r6_32', 4, 4)]),
Register(name='r7', size=8,
general_purpose=True, persistent=True,
subregisters=[('r7_32', 4, 4)]),
Register(name='r8', size=8,
general_purpose=True, persistent=True,
subregisters=[('r8_32', 4, 4)]),
Register(name='r9', size=8,
general_purpose=True, persistent=True,
subregisters=[('r9_32', 4, 4)]),
Register(name='r10', size=8,
general_purpose=True, persistent=True,
subregisters=[('r10_32', 4, 4)]),
Register(name='r11', size=8, alias_names=('bp',),
general_purpose=True, persistent=True,
subregisters=[('r11_32', 4, 4)]),
Register(name='r12', size=8,
general_purpose=True, persistent=True,
subregisters=[('r12_32', 4, 4)]),
Register(name='r13', size=8,
general_purpose=True, persistent=True,
subregisters=[('r13_32', 4, 4)]),
# Strictly speaking, there is no fixed link register on s390x.
# However, %r14 is almost always used for that, so mark it as such.
# Situations when that's not the case (e.g. brasl %r0,X)
# can still be handled explicitly.
Register(name='r14', size=8,
general_purpose=True, alias_names=('lr',)),
Register(name='r15', size=8, alias_names=('sp',),
general_purpose=True, persistent=True,
default_value=(initial_sp, True, 'global')),
Register(name='v0', size=16, subregisters=[('f0', 0, 8)],
floating_point=True),
Register(name='v1', size=16, subregisters=[('f1', 0, 8)],
floating_point=True),
Register(name='v2', size=16, subregisters=[('f2', 0, 8)],
floating_point=True),
Register(name='v3', size=16, subregisters=[('f3', 0, 8)],
floating_point=True),
Register(name='v4', size=16, subregisters=[('f4', 0, 8)],
floating_point=True),
Register(name='v5', size=16, subregisters=[('f5', 0, 8)],
floating_point=True),
Register(name='v6', size=16, subregisters=[('f6', 0, 8)],
floating_point=True),
Register(name='v7', size=16, subregisters=[('f7', 0, 8)],
floating_point=True),
Register(name='v8', size=16, subregisters=[('f8', 0, 8)],
floating_point=True),
Register(name='v9', size=16, subregisters=[('f9', 0, 8)],
floating_point=True),
Register(name='v10', size=16, subregisters=[('f10', 0, 8)],
floating_point=True),
Register(name='v11', size=16, subregisters=[('f11', 0, 8)],
floating_point=True),
Register(name='v12', size=16, subregisters=[('f12', 0, 8)],
floating_point=True),
Register(name='v13', size=16, subregisters=[('f13', 0, 8)],
floating_point=True),
Register(name='v14', size=16, subregisters=[('f14', 0, 8)],
floating_point=True),
Register(name='v15', size=16, subregisters=[('f15', 0, 8)],
floating_point=True),
Register(name='v16', size=16, vector=True),
Register(name='v17', size=16, vector=True),
Register(name='v18', size=16, vector=True),
Register(name='v19', size=16, vector=True),
Register(name='v20', size=16, vector=True),
Register(name='v21', size=16, vector=True),
Register(name='v22', size=16, vector=True),
Register(name='v23', size=16, vector=True),
Register(name='v24', size=16, vector=True),
Register(name='v25', size=16, vector=True),
Register(name='v26', size=16, vector=True),
Register(name='v27', size=16, vector=True),
Register(name='v28', size=16, vector=True),
Register(name='v29', size=16, vector=True),
Register(name='v30', size=16, vector=True),
Register(name='v31', size=16, vector=True),
Register(name='a0', size=4),
Register(name='a1', size=4),
Register(name='a2', size=4),
Register(name='a3', size=4),
Register(name='a4', size=4),
Register(name='a5', size=4),
Register(name='a6', size=4),
Register(name='a7', size=4),
Register(name='a8', size=4),
Register(name='a9', size=4),
Register(name='a10', size=4),
Register(name='a11', size=4),
Register(name='a12', size=4),
Register(name='a13', size=4),
Register(name='a14', size=4),
Register(name='a15', size=4),
Register(name='nraddr', size=8),
Register(name='cmstart', size=8),
Register(name='cmlen', size=8),
Register(name='ip_at_syscall', size=8, artificial=True),
Register(name='emnote', size=4, artificial=True),
]
function_prologs = {
br'\xeb.[\xf0-\xff]..\x24', # stmg %r1,%r3,d2(%r15)
}
function_epilogs = {
br'\x07\xf4', # br %r14
}
got_section_name = '.got'
ld_linux_name = 'ld64.so.1'
elf_tls = TLSArchInfo(
variant=2, # 3.4.7 @ https://www.uclibc.org/docs/tls.pdf
tcbhead_size=64, # sizeof(tcbhead_t)
head_offsets=[0], # offsetof(tcbhead_t, tcb)
dtv_offsets=[8], # offsetof(tcbhead_t, dtv)
pthread_offsets=[16], # offsetof(tcbhead_t, self)
tp_offset=0,
dtv_entry_offset=0)
register_arch(['s390'], 64, Endness.BE, ArchS390X)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import datetime
import logging
import os
import random
import sys
import threading
import time
import unittest
import pyrax
import pyrax.exceptions as exc
import pyrax.utils as utils
class SmokeTester(object):
def __init__(self, context, region, logname=None, nolog=False,
clean=False):
self.context = context
self.region = region
self.clean = clean
self.failures = []
self.cleanup_items = []
self.smoke_server = None
self.smoke_volume = None
self.smoke_snapshot = None
logname = "%s-%s" % (logname or "smoketest", self.region)
self.log = logging.getLogger(logname)
if nolog:
handler = logging.NullHandler()
else:
handler = logging.FileHandler(filename=logname, mode="w",
encoding="utf-8")
formatter = logging.Formatter("%(asctime)s - %(message)s")
handler.setFormatter(formatter)
self.log.addHandler(handler)
self.log.setLevel(logging.DEBUG)
self.cs = self.context.get_client("cloudservers", self.region)
self.cf = self.context.get_client("cloudfiles", self.region)
self.cbs = self.context.get_client("cloud_blockstorage", self.region)
self.cdb = self.context.get_client("cloud_databases", self.region)
self.clb = self.context.get_client("cloud_loadbalancers", self.region)
self.dns = self.context.get_client("cloud_dns", self.region)
self.cnw = self.context.get_client("cloud_networks", self.region)
self.cmn = self.context.get_client("cloud_monitoring", self.region)
self.au = self.context.get_client("autoscale", self.region)
self.pq = self.context.get_client("queues", self.region)
self.services = ({"service": self.cs, "name": "Cloud Servers"},
{"service": self.cf, "name": "Cloud Files"},
{"service": self.cbs, "name": "Cloud Block Storage"},
{"service": self.cdb, "name": "Cloud Databases"},
{"service": self.clb, "name": "Cloud Load Balancers"},
{"service": self.dns, "name": "Cloud DNS"},
{"service": self.cnw, "name": "Cloud Networks"},
{"service": self.cmn, "name": "Cloud Monitoring"},
{"service": self.au, "name": "Auto Scale"},
{"service": self.pq, "name": "Cloud Queues"},
)
def logit(self, *args, **kwargs):
txtargs = ["%s" % arg for arg in args]
msg = " ".join(txtargs)
print("%s - %s" % (self.region, msg), **kwargs)
self.log.debug(msg)
def check_services(self):
for service in self.services:
self.logit("SERVICE:", service["name"], end=' ')
if service["service"]:
self.logit("Success!")
else:
self.logit("FAIL!")
self.failures.append("Service=%s" % service["name"])
def run_clean(self):
def cleanup_smoke(svc, list_method=None, *list_params):
list_method = list_method or "list"
mthd = getattr(svc, list_method)
try:
svcname = svc.name
except AttributeError:
svcname = "%s" % svc
try:
ents = [ent for ent in mthd(*list_params)
if ent.name.startswith("SMOKE")]
except Exception as e:
self.logit("Error listing for service", svcname)
self.logit(" Exception:", e)
return
if ents:
try:
ent.delete()
self.logit("Deleting", svcname, "resource", ent.id)
except Exception as e:
self.logit("Error deleting", svcname, "resource", ent.id)
self.logit(" Exception:", e)
else:
self.logit("No smoketest resources found in region",
self.region, "for service", svcname)
cleanup_smoke(self.cnw)
cleanup_smoke(self.cs)
cleanup_smoke(self.cdb)
cleanup_smoke(self.cf, "list_container_objects", "SMOKETEST_CONTAINER")
cleanup_smoke(self.cf)
cleanup_smoke(self.clb)
cleanup_smoke(self.dns, "list_records", "SMOKETEST.example.edu")
cleanup_smoke(self.dns)
cleanup_smoke(self.cmn, "list_checks", "SMOKETEST_entity")
cleanup_smoke(self.cmn, "list_entities")
cleanup_smoke(self.cmn, "list_notifications")
cleanup_smoke(self.cmn, "list_notification_plans")
cleanup_smoke(self.cmn, "list_alarms", "SMOKETEST_entity")
cleanup_smoke(self.cbs)
return
def run_tests(self):
if self.clean:
return self.run_clean()
if self.cs:
self.logit("Running 'compute' tests...")
self.cs_list_flavors()
self.cs_list_images()
self.cs_create_server()
self.cs_reboot_server()
self.cs_list_servers()
if self.cnw:
self.logit("Running 'network' tests...")
try:
self.cnw_create_network()
self.cnw_list_networks()
except exc.NotFound:
# Networking not supported
self.logit(" - Networking not supported.")
except exc.NetworkCountExceeded:
self.logit(" - Too many networks already exist.")
if self.cdb:
self.logit("Running 'database' tests...")
self.cdb_list_flavors()
self.cdb_create_instance()
self.cdb_create_db()
self.cdb_create_user()
if self.cf:
self.logit("Running 'object_store' tests...")
self.cf_create_container()
self.cf_list_containers()
self.cf_make_container_public()
self.cf_make_container_private()
self.cf_upload_file()
if self.clb:
self.logit("Running 'load_balancer' tests...")
self.lb_list()
self.lb_create()
if self.dns:
self.logit("Running 'DNS' tests...")
self.dns_list()
self.dns_create_domain()
self.dns_create_record()
if self.cmn:
if not self.smoke_server:
self.logit("Server not available; skipping Monitoring tests.")
return
self.cmn_create_entity()
self.cmn_list_check_types()
self.cmn_list_monitoring_zones()
self.cmn_create_check()
self.cmn_create_notification()
self.cmn_create_notification_plan()
self.cmn_create_alarm()
if self.cbs:
self.cbs_list_volumes()
self.cbs_list_types()
self.cbs_list_snapshots()
self.cbs_create_volume()
self.cbs_attach_to_instance()
self.cbs_detach_from_instance()
self.cbs_create_snapshot()
self.cbs_delete_snapshot()
# Specific tests start here ##
def cs_list_flavors(self):
self.logit("Listing Flavors:", end=' ')
self.cs_flavors = self.cs.list_flavors()
if self.cs_flavors:
self.logit()
for flavor in self.cs_flavors:
self.logit(" -", flavor)
else:
self.logit("FAIL!")
self.failures.append("FLAVORS")
self.logit()
def cs_list_images(self):
self.logit("Listing Images:", end=' ')
self.cs_images = self.cs.list_base_images()
if self.cs_images:
for image in self.cs_images:
self.logit(" -", image)
else:
self.logit("FAIL!")
self.failures.append("IMAGES")
def cnw_create_network(self):
self.logit("Creating network...")
new_network_name = "SMOKETEST_NW"
new_network_cidr = "192.168.0.0/24"
self.logit("CREATE NETWORK:", end=' ')
self.logit("CNW", self.cnw)
self.smoke_network = self.cnw.create(new_network_name,
cidr=new_network_cidr)
self.cleanup_items.append(self.smoke_network)
if self.smoke_network:
self.logit("Success!")
else:
self.logit("FAIL!")
self.failures.append("CREATE NETWORK")
def cnw_list_networks(self):
self.logit("Listing networks...")
try:
networks = self.cnw.list()
except exc.NotFound:
# Many non-rax system do no support networking.
self.logit("Networking not available")
return
for network in networks:
self.logit(" - %s: %s (%s)" % (network.id, network.name,
network.cidr))
if not networks:
self.failures.append("LIST NETWORKS")
def log_wait(self, obj, att="status", desired=None, verbose_atts=None):
start = time.time()
self.logit("Beginning wait for", obj.name, obj)
if not desired:
desired = ["ACTIVE", "ERROR"]
ret = utils.wait_until(obj, "status", desired=desired, interval=10,
verbose=True, verbose_atts="progress")
end = time.time()
duration = str(datetime.timedelta(seconds=(end - start)))
self.logit("Completed wait for", obj.name, obj)
self.logit(" It took %s to complete" % duration)
return ret
def cs_create_server(self):
self.logit("Creating server...")
img = [img for img in self.cs_images
if "12.04" in img.name][0]
flavor = self.cs_flavors[0]
self.smoke_server = self.cs.servers.create("SMOKETEST_SERVER",
img.id, flavor.id)
self.cleanup_items.append(self.smoke_server)
self.smoke_server = self.log_wait(self.smoke_server)
if self.smoke_server.status == "ERROR":
self.logit("Server creation failed!")
self.failures.append("SERVER CREATION")
else:
self.logit("Success!")
def cs_reboot_server(self):
self.logit("Rebooting server...")
self.smoke_server.reboot()
self.smoke_server = self.log_wait(self.smoke_server)
if self.smoke_server.status == "ERROR":
self.logit("Server reboot failed!")
self.failures.append("SERVER REBOOT")
else:
self.logit("Success!")
def cs_list_servers(self):
self.logit("Listing servers...")
servers = self.cs.servers.list()
if not servers:
self.logit("Server listing failed!")
self.failures.append("SERVER LISTING")
else:
for server in servers:
self.logit(" -", server.id, server.name)
def cdb_list_flavors(self):
self.logit("Listing Database Flavors:", end=' ')
try:
self.cdb_flavors = self.cdb.list_flavors()
except Exception as e:
self.logit("FAIL! List DB Flavors:", e)
self.cdb_flavors = None
if self.cdb_flavors:
for flavor in self.cdb_flavors:
self.logit(" -", flavor)
else:
self.logit("FAIL!")
self.failures.append("DB FLAVORS")
def cdb_create_instance(self):
if not self.cdb_flavors:
# Skip this test
self.logit("Skipping database instance creation...")
self.smoke_instance = None
return
self.logit("Creating database instance...")
self.smoke_instance = self.cdb.create("SMOKETEST_DB_INSTANCE",
flavor=self.cdb_flavors[0], volume=1)
self.cleanup_items.append(self.smoke_instance)
self.smoke_instance = self.log_wait(self.smoke_instance)
if self.smoke_instance.status == "ACTIVE":
self.logit("Success!")
else:
self.logit("FAIL!")
self.failures.append("DB INSTANCE CREATION")
def cdb_create_db(self):
if not self.smoke_instance:
# Skip this test
self.logit("Skipping database creation...")
return
self.logit("Creating database...")
self.smoke_db = self.smoke_instance.create_database("SMOKETEST_DB")
self.cleanup_items.append(self.smoke_db)
dbs = self.smoke_instance.list_databases()
if self.smoke_db in dbs:
self.logit("Success!")
else:
self.logit("FAIL!")
self.failures.append("DB DATABASE CREATION")
def cdb_create_user(self):
if not self.smoke_instance:
# Skip this test
self.logit("Skipping database user creation...")
return
self.logit("Creating database user...")
self.smoke_user = self.smoke_instance.create_user("SMOKETEST_USER",
"SMOKETEST_PW", database_names=[self.smoke_db])
self.cleanup_items.append(self.smoke_user)
users = self.smoke_instance.list_users()
if self.smoke_user in users:
self.logit("Success!")
else:
self.logit("FAIL!")
self.failures.append("DB USER CREATION")
def cf_create_container(self):
self.logit("Creating a Cloud Files Container...")
self.smoke_cont = self.cf.create_container("SMOKETEST_CONTAINER")
self.cleanup_items.append(self.smoke_cont)
if self.smoke_cont:
self.logit("Success!")
else:
self.logit("FAIL!")
self.failures.append("CONTAINER CREATION")
def cf_list_containers(self):
self.logit("Listing the Cloud Files Containers...")
conts = self.cf.get_all_containers()
if conts:
for cont in conts:
try:
nm = cont.name
num = cont.object_count
size = cont.total_bytes
self.logit("%s - %s files, %s bytes" % (nm, num, size))
except Exception as e:
self.logit("FAIL! Container description", e)
else:
self.logit("FAIL!")
self.failures.append("CONTAINER LISTING")
def cf_make_container_public(self):
self.logit("Publishing the Cloud Files Container to CDN...")
self.smoke_cont.make_public()
uri = self.smoke_cont.cdn_uri
if uri:
self.logit("Success!")
else:
self.logit("FAIL!")
self.failures.append("PUBLISHING CDN")
def cf_make_container_private(self):
self.logit("Removing the Cloud Files Container from CDN...")
try:
self.smoke_cont.make_private()
self.logit("Success!")
except Exception as e:
self.logit("FAIL!", e)
self.failures.append("UNPUBLISHING CDN")
def cf_upload_file(self):
self.logit("Uploading a Cloud Files object...")
cont = self.smoke_cont
text = utils.random_ascii(1024)
obj = cont.store_object("SMOKETEST_OBJECT", text)
# Make sure it is deleted before the container
self.cleanup_items.insert(0, obj)
all_objs = cont.get_object_names()
if obj.name in all_objs:
self.logit("Success!")
else:
self.logit("FAIL!")
self.failures.append("UPLOAD FILE")
def lb_list(self):
self.logit("Listing Load Balancers...")
lbs = self.clb.list()
if not lbs:
self.logit(" - No load balancers to list!")
else:
for lb in lbs:
self.logit(" -", lb.name)
def lb_create(self):
self.logit("Creating a Load Balancer...")
node = self.clb.Node(address="10.177.1.1", port=80, condition="ENABLED")
vip = self.clb.VirtualIP(type="PUBLIC")
lb = self.clb.create("SMOKETEST_LB", port=80, protocol="HTTP",
nodes=[node], virtual_ips=[vip])
self.cleanup_items.append(lb)
lb = self.log_wait(lb)
if lb:
self.logit("Success!")
else:
self.logit("FAIL!")
self.failures.append("LOAD_BALANCERS")
def dns_list(self):
self.logit("Listing DNS Domains...")
doms = self.dns.list()
if not doms:
self.logit(" - No domains to list!")
else:
for dns in doms:
self.logit(" -", dns.name)
def dns_create_domain(self):
self.logit("Creating a DNS Domain...")
domain_name = "SMOKETEST.example.edu"
try:
dom = self.dns.create(name=domain_name,
emailAddress="sample@example.edu", ttl=900,
comment="SMOKETEST sample domain")
self.logit("Success!")
self.cleanup_items.append(dom)
except exc.DomainCreationFailed as e:
self.logit("FAIL!", e)
self.failures.append("DNS DOMAIN CREATION")
def dns_create_record(self):
self.logit("Creating a DNS Record...")
domain_name = "SMOKETEST.example.edu"
try:
dom = self.dns.find(name=domain_name)
except exc.NotFound:
self.logit("Smoketest domain not found; skipping record test.")
self.failures.append("DNS RECORD CREATION")
return
a_rec = {"type": "A",
"name": domain_name,
"data": "1.2.3.4",
"ttl": 6000}
try:
recs = dom.add_records(a_rec)
self.logit("Success!")
# No need to cleanup, since domain deletion also deletes the recs.
# self.cleanup_items.extend(recs)
except exc.DomainRecordAdditionFailed as e:
self.logit("FAIL!", e)
self.failures.append("DNS RECORD CREATION")
def cmn_list_check_types(self):
self.logit("Listing Monitoring Check Types...")
cts = self.cmn.list_check_types()
for ct in cts:
self.logit(" -", ct.id, ct.type)
def cmn_list_monitoring_zones(self):
self.logit("Listing Monitoring Zones...")
zones = self.cmn.list_monitoring_zones()
for zone in zones:
self.logit(" -", zone.id, zone.name)
def cmn_create_entity(self):
self.logit("Creating a Monitoring Entity...")
srv = self.smoke_server
ip = srv.networks["public"][0]
try:
self.smoke_entity = self.cmn.create_entity(name="SMOKETEST_entity",
ip_addresses={"main": ip})
self.cleanup_items.append(self.smoke_entity)
self.logit("Success!")
except Exception as e:
self.logit("FAIL!", e)
self.smoke_entity = None
self.failures.append("MONITORING CREATE ENTITY")
def cmn_create_check(self):
self.logit("Creating a Monitoring Check...")
ent = self.smoke_entity
alias = ent.ip_addresses.keys()[0]
try:
self.smoke_check = self.cmn.create_check(ent,
label="SMOKETEST_check", check_type="remote.ping",
details={"count": 5}, monitoring_zones_poll=["mzdfw"],
period=60, timeout=20, target_alias=alias)
self.logit("Success!")
self.cleanup_items.append(self.smoke_check)
except Exception as e:
self.logit("FAIL!", e)
self.smoke_check = None
self.failures.append("MONITORING CREATE CHECK")
def cmn_create_notification(self):
self.logit("Creating a Monitoring Notification...")
email = "smoketest@example.com"
try:
self.smoke_notification = self.cmn.create_notification("email",
label="SMOKETEST_NOTIFICATION", details={"address": email})
self.logit("Success!")
self.cleanup_items.append(self.smoke_notification)
except Exception as e:
self.logit("FAIL!", e)
self.smoke_notification = None
self.failures.append("MONITORING CREATE NOTIFICATION")
def cmn_create_notification_plan(self):
if not self.smoke_notification:
self.logit("No monitoring notification found; skipping "
"notification creation...")
return
self.logit("Creating a Monitoring Notification Plan...")
try:
self.smoke_notification_plan = self.cmn.create_notification_plan(
label="SMOKETEST_PLAN", ok_state=self.smoke_notification)
self.logit("Success!")
self.cleanup_items.append(self.smoke_notification_plan)
except Exception as e:
self.logit("FAIL!", e)
self.smoke_notification_plan = None
self.failures.append("MONITORING CREATE NOTIFICATION PLAN")
def cmn_create_alarm(self):
if not self.smoke_notification_plan:
self.logit("No monitoring plan found; skipping alarm creation...")
return
self.logit("Creating a Monitoring Alarm...")
try:
self.smoke_alarm = self.cmn.create_alarm(self.smoke_entity,
self.smoke_check, self.smoke_notification_plan,
label="SMOKETEST_ALARM")
self.logit("Success!")
self.cleanup_items.append(self.smoke_alarm)
except Exception as e:
self.logit("FAIL!", e)
self.failures.append("MONITORING CREATE ALARM")
def cbs_list_volumes(self):
self.logit("Listing Block Storage Volumes...")
vols = self.cbs.list()
for vol in vols:
self.logit(" -", vol.name, "(%s)" % vol.volume_type, "Size:",
vol.size)
def cbs_list_types(self):
self.logit("Listing Block Storage Volume Types...")
typs = self.cbs.list_types()
for typ in typs:
self.logit(" -", typ.name)
def cbs_list_snapshots(self):
self.logit("Listing Block Storage Snapshots...")
snaps = self.cbs.list_snapshots()
for snap in snaps:
self.logit(" -", snap.name, "(%s)" % snap.status, "Size:",
snap.size)
def cbs_create_volume(self):
self.logit("Creating Volume...")
typ = random.choice(self.cbs.list_types())
self.smoke_volume = self.cbs.create("SMOKETEST_VOLUME", size=100,
volume_type="SATA", description="SMOKETEST_VOLUME_DESCRIPTION")
self.cleanup_items.append(self.smoke_volume)
self.smoke_volume = self.log_wait(self.smoke_volume,
desired=["available", "error"])
if self.smoke_volume.status == "ERROR":
self.logit("Volume creation failed!")
self.failures.append("VOLUME CREATION")
else:
self.logit("Success!")
def cbs_attach_to_instance(self):
if not self.smoke_server:
self.logit("Server not available; skipping volume attach tests.")
return
self.logit("Attaching Volume to instance...")
try:
self.smoke_volume.attach_to_instance(self.smoke_server, "/dev/xvdb")
except Exception as e:
self.logit("FAIL!", e)
return
self.smoke_volume = self.log_wait(self.smoke_volume,
desired=["in-use", "error"])
self.logit("Success!")
def cbs_detach_from_instance(self):
if not self.smoke_server:
self.logit("Server not available; skipping volume detach tests.")
return
self.logit("Detaching Volume from instance...")
try:
self.smoke_volume.detach()
except Exception as e:
self.logit("FAIL!", e)
return
self.smoke_volume = self.log_wait(self.smoke_volume,
desired=["available", "error"])
self.logit("Success!")
def cbs_create_snapshot(self):
if not self.smoke_volume:
self.logit("Volume not available; skipping snapshot tests.")
return
self.logit("Creating Snapshot...")
try:
self.smoke_snapshot = self.cbs.create_snapshot(self.smoke_volume,
name="SMOKETEST_SNAPSHOT")
except Exception as e:
self.logit("FAIL!", e)
return
self.smoke_snapshot = self.log_wait(self.smoke_snapshot,
desired=["available", "error"])
self.logit("Success!")
def cbs_delete_snapshot(self):
if not self.smoke_snapshot:
self.logit("Snapshot not available; skipping snapshot deletion.")
return
self.logit("Deleting Snapshot...")
try:
self.cbs.delete_snapshot(self.smoke_snapshot)
except Exception as e:
self.logit("FAIL!", e)
return
# Need to wait until the snapshot is deleted
snap_id = self.smoke_snapshot.id
self.logit("Waiting for snapshot deletion...")
while True:
try:
snap = self.cbs.get_snapshot(snap_id)
except exc.NotFound:
break
time.sleep(5)
self.logit("Success!")
def cleanup(self):
self.logit("Cleaning up...")
for item in self.cleanup_items:
try:
item.delete()
self.logit(" - Deleting:", end=' ')
try:
self.logit(item.name)
except AttributeError:
self.logit(item)
except exc.NotFound:
# Some items are deleted along with others (e.g., DNS records
# when a domain is deleted), so don't complain.
pass
except Exception as e:
self.logit("Could not delete '%s': %s" % (item, e))
class TestThread(threading.Thread):
def __init__(self, context, region, logname, nolog, clean):
self.context = context
self.region = region
self.clean = clean
self.tester = SmokeTester(context, region, logname, nolog, clean)
threading.Thread.__init__(self)
def run(self):
print()
print("=" * 77)
if self.clean:
print("Starting cleanup for region: %s" % self.region)
else:
print("Starting test for region: %s" % self.region)
print("=" * 77)
try:
self.tester.run_tests()
finally:
self.tester.cleanup()
print()
print("=" * 88)
if self.tester.failures:
print("The following tests failed:")
for failure in self.tester.failures:
print(" -", failure)
else:
print(self.region, "- all tests passed!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the smoke tests!")
parser.add_argument("--regions", "-r", action="append",
help="""Regions to run tests against. Can be specified multiple
times. If not specified, the default of pyrax.regions will be
used.""")
parser.add_argument("--env", "-e", help="""Configuration environment to
use for the test. If not specified, the `default` environment is
used.""")
parser.add_argument("--logname", "-l", help="""Optional prefix name for the
log file created for each region in the smoketest.
Default = 'smoketest-REGION'. """)
parser.add_argument("--no-log", "-n", action="store_true",
help="""Turns off logging. No log files will be created if this
parameter is set.""")
parser.add_argument("--clean", "-c", action="store_true", help="""Don't
run the tests; instead, go through the account and delete any
resources that begin with 'SMOKE'.""")
args = parser.parse_args()
env = args.env
regions = args.regions
logname = args.logname or "smoketest"
nolog = args.no_log
clean = args.clean
start = time.time()
context = pyrax.create_context(env=env)
print("Authenticating...", end=" ")
try:
context.keyring_auth()
print("Success!")
except Exception as e:
print("FAIL!", e)
exit()
if not regions:
regions = context.regions
test_threads = []
for region in regions:
try:
test = TestThread(context, region, logname, nolog, clean)
except exc.NoSuchClient:
print("ERROR - no client for region '%s'" % region)
continue
test_threads.append(test)
test.start()
for test_thread in test_threads:
test_thread.join()
end = time.time()
print()
print("Running the smoketests took %6.1f seconds." % (end - start))
print()
| |
"""Tests for tasks.py."""
import gc
import os.path
import unittest
from test.script_helper import assert_python_ok
import asyncio
from asyncio import test_utils
@asyncio.coroutine
def coroutine_function():
pass
class Dummy:
def __repr__(self):
return 'Dummy()'
def __call__(self, *args):
pass
class TaskTests(unittest.TestCase):
def setUp(self):
self.loop = test_utils.TestLoop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
gc.collect()
def test_task_class(self):
@asyncio.coroutine
def notmuch():
return 'ok'
t = asyncio.Task(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
loop = asyncio.new_event_loop()
t = asyncio.Task(notmuch(), loop=loop)
self.assertIs(t._loop, loop)
loop.close()
def test_async_coroutine(self):
@asyncio.coroutine
def notmuch():
return 'ok'
t = asyncio.async(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
loop = asyncio.new_event_loop()
t = asyncio.async(notmuch(), loop=loop)
self.assertIs(t._loop, loop)
loop.close()
def test_async_future(self):
f_orig = asyncio.Future(loop=self.loop)
f_orig.set_result('ko')
f = asyncio.async(f_orig)
self.loop.run_until_complete(f)
self.assertTrue(f.done())
self.assertEqual(f.result(), 'ko')
self.assertIs(f, f_orig)
loop = asyncio.new_event_loop()
with self.assertRaises(ValueError):
f = asyncio.async(f_orig, loop=loop)
loop.close()
f = asyncio.async(f_orig, loop=self.loop)
self.assertIs(f, f_orig)
def test_async_task(self):
@asyncio.coroutine
def notmuch():
return 'ok'
t_orig = asyncio.Task(notmuch(), loop=self.loop)
t = asyncio.async(t_orig)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t, t_orig)
loop = asyncio.new_event_loop()
with self.assertRaises(ValueError):
t = asyncio.async(t_orig, loop=loop)
loop.close()
t = asyncio.async(t_orig, loop=self.loop)
self.assertIs(t, t_orig)
def test_async_neither(self):
with self.assertRaises(TypeError):
asyncio.async('ok')
def test_task_repr(self):
@asyncio.coroutine
def notmuch():
yield from []
return 'abc'
t = asyncio.Task(notmuch(), loop=self.loop)
t.add_done_callback(Dummy())
self.assertEqual(repr(t), 'Task(<notmuch>)<PENDING, [Dummy()]>')
t.cancel() # Does not take immediate effect!
self.assertEqual(repr(t), 'Task(<notmuch>)<CANCELLING, [Dummy()]>')
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, t)
self.assertEqual(repr(t), 'Task(<notmuch>)<CANCELLED>')
t = asyncio.Task(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
self.assertEqual(repr(t), "Task(<notmuch>)<result='abc'>")
def test_task_repr_custom(self):
@asyncio.coroutine
def coro():
pass
class T(asyncio.Future):
def __repr__(self):
return 'T[]'
class MyTask(asyncio.Task, T):
def __repr__(self):
return super().__repr__()
gen = coro()
t = MyTask(gen, loop=self.loop)
self.assertEqual(repr(t), 'T[](<coro>)')
gen.close()
def test_task_basics(self):
@asyncio.coroutine
def outer():
a = yield from inner1()
b = yield from inner2()
return a+b
@asyncio.coroutine
def inner1():
return 42
@asyncio.coroutine
def inner2():
return 1000
t = outer()
self.assertEqual(self.loop.run_until_complete(t), 1042)
def test_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
@asyncio.coroutine
def task():
yield from asyncio.sleep(10.0, loop=loop)
return 12
t = asyncio.Task(task(), loop=loop)
loop.call_soon(t.cancel)
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_yield(self):
@asyncio.coroutine
def task():
yield
yield
return 12
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start coro
t.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_inner_future(self):
f = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from f
return 12
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start task
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_both_task_and_inner_future(self):
f = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from f
return 12
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
t.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_task_catching(self):
fut1 = asyncio.Future(loop=self.loop)
fut2 = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from fut1
try:
yield from fut2
except asyncio.CancelledError:
return 42
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(t.cancelled())
def test_cancel_task_ignoring(self):
fut1 = asyncio.Future(loop=self.loop)
fut2 = asyncio.Future(loop=self.loop)
fut3 = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from fut1
try:
yield from fut2
except asyncio.CancelledError:
pass
res = yield from fut3
return res
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut3) # White-box test.
fut3.set_result(42)
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(fut3.cancelled())
self.assertFalse(t.cancelled())
def test_cancel_current_task(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
@asyncio.coroutine
def task():
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
# The sleep should be cancelled immediately.
yield from asyncio.sleep(100, loop=loop)
return 12
t = asyncio.Task(task(), loop=loop)
self.assertRaises(
asyncio.CancelledError, loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertFalse(t._must_cancel) # White-box test.
self.assertFalse(t.cancel())
def test_stop_while_run_in_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
when = yield 0.1
self.assertAlmostEqual(0.3, when)
yield 0.1
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
x = 0
waiters = []
@asyncio.coroutine
def task():
nonlocal x
while x < 10:
waiters.append(asyncio.sleep(0.1, loop=loop))
yield from waiters[-1]
x += 1
if x == 2:
loop.stop()
t = asyncio.Task(task(), loop=loop)
self.assertRaises(
RuntimeError, loop.run_until_complete, t)
self.assertFalse(t.done())
self.assertEqual(x, 2)
self.assertAlmostEqual(0.3, loop.time())
# close generators
for w in waiters:
w.close()
def test_wait_for(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
when = yield 0.1
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
foo_running = None
@asyncio.coroutine
def foo():
nonlocal foo_running
foo_running = True
try:
yield from asyncio.sleep(0.2, loop=loop)
finally:
foo_running = False
return 'done'
fut = asyncio.Task(foo(), loop=loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.1, loop=loop))
self.assertTrue(fut.done())
# it should have been cancelled due to the timeout
self.assertTrue(fut.cancelled())
self.assertAlmostEqual(0.1, loop.time())
self.assertEqual(foo_running, False)
def test_wait_for_blocking(self):
loop = test_utils.TestLoop()
self.addCleanup(loop.close)
@asyncio.coroutine
def coro():
return 'done'
res = loop.run_until_complete(asyncio.wait_for(coro(),
timeout=None,
loop=loop))
self.assertEqual(res, 'done')
def test_wait_for_with_global_loop(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
@asyncio.coroutine
def foo():
yield from asyncio.sleep(0.2, loop=loop)
return 'done'
asyncio.set_event_loop(loop)
try:
fut = asyncio.Task(foo(), loop=loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.01))
finally:
asyncio.set_event_loop(None)
self.assertAlmostEqual(0.01, loop.time())
self.assertTrue(fut.done())
self.assertTrue(fut.cancelled())
def test_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a], loop=loop)
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(res, 42)
self.assertAlmostEqual(0.15, loop.time())
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
self.assertEqual(res, 42)
def test_wait_with_global_loop(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0
self.assertAlmostEqual(0.015, when)
yield 0.015
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.Task(asyncio.sleep(0.01, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.015, loop=loop), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a])
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
asyncio.set_event_loop(loop)
try:
res = loop.run_until_complete(
asyncio.Task(foo(), loop=loop))
finally:
asyncio.set_event_loop(None)
self.assertEqual(res, 42)
def test_wait_duplicate_coroutines(self):
@asyncio.coroutine
def coro(s):
return s
c = coro('test')
task = asyncio.Task(
asyncio.wait([c, c, coro('spam')], loop=self.loop),
loop=self.loop)
done, pending = self.loop.run_until_complete(task)
self.assertFalse(pending)
self.assertEqual(set(f.result() for f in done), {'test', 'spam'})
def test_wait_errors(self):
self.assertRaises(
ValueError, self.loop.run_until_complete,
asyncio.wait(set(), loop=self.loop))
self.assertRaises(
ValueError, self.loop.run_until_complete,
asyncio.wait([asyncio.sleep(10.0, loop=self.loop)],
return_when=-1, loop=self.loop))
def test_wait_first_completed(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
task = asyncio.Task(
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED,
loop=loop),
loop=loop)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertFalse(a.done())
self.assertTrue(b.done())
self.assertIsNone(b.result())
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_really_done(self):
# there is possibility that some tasks in the pending list
# became done but their callbacks haven't all been called yet
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
yield
yield
a = asyncio.Task(coro1(), loop=self.loop)
b = asyncio.Task(coro2(), loop=self.loop)
task = asyncio.Task(
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED,
loop=self.loop),
loop=self.loop)
done, pending = self.loop.run_until_complete(task)
self.assertEqual({a, b}, done)
self.assertTrue(a.done())
self.assertIsNone(a.result())
self.assertTrue(b.done())
self.assertIsNone(b.result())
def test_wait_first_exception(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
# first_exception, task already has exception
a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop)
@asyncio.coroutine
def exc():
raise ZeroDivisionError('err')
b = asyncio.Task(exc(), loop=loop)
task = asyncio.Task(
asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION,
loop=loop),
loop=loop)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_first_exception_in_wait(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
# first_exception, exception during waiting
a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop)
@asyncio.coroutine
def exc():
yield from asyncio.sleep(0.01, loop=loop)
raise ZeroDivisionError('err')
b = asyncio.Task(exc(), loop=loop)
task = asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION,
loop=loop)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0.01, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_with_exception(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
@asyncio.coroutine
def sleeper():
yield from asyncio.sleep(0.15, loop=loop)
raise ZeroDivisionError('really')
b = asyncio.Task(sleeper(), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a], loop=loop)
self.assertEqual(len(done), 2)
self.assertEqual(pending, set())
errors = set(f for f in done if f.exception() is not None)
self.assertEqual(len(errors), 1)
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
def test_wait_with_timeout(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.11, when)
yield 0.11
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a], timeout=0.11,
loop=loop)
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.11, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_concurrent_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop)
done, pending = loop.run_until_complete(
asyncio.wait([b, a], timeout=0.1, loop=loop))
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_as_completed(self):
def gen():
yield 0
yield 0
yield 0.01
yield 0
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
completed = set()
time_shifted = False
@asyncio.coroutine
def sleeper(dt, x):
nonlocal time_shifted
yield from asyncio.sleep(dt, loop=loop)
completed.add(x)
if not time_shifted and 'a' in completed and 'b' in completed:
time_shifted = True
loop.advance_time(0.14)
return x
a = sleeper(0.01, 'a')
b = sleeper(0.01, 'b')
c = sleeper(0.15, 'c')
@asyncio.coroutine
def foo():
values = []
for f in asyncio.as_completed([b, c, a], loop=loop):
values.append((yield from f))
return values
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
self.assertTrue('a' in res[:2])
self.assertTrue('b' in res[:2])
self.assertEqual(res[2], 'c')
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
def test_as_completed_with_timeout(self):
def gen():
yield
yield 0
yield 0
yield 0.1
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.sleep(0.1, 'a', loop=loop)
b = asyncio.sleep(0.15, 'b', loop=loop)
@asyncio.coroutine
def foo():
values = []
for f in asyncio.as_completed([a, b], timeout=0.12, loop=loop):
if values:
loop.advance_time(0.02)
try:
v = yield from f
values.append((1, v))
except asyncio.TimeoutError as exc:
values.append((2, exc))
return values
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(len(res), 2, res)
self.assertEqual(res[0], (1, 'a'))
self.assertEqual(res[1][0], 2)
self.assertIsInstance(res[1][1], asyncio.TimeoutError)
self.assertAlmostEqual(0.12, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_as_completed_with_unused_timeout(self):
def gen():
yield
yield 0
yield 0.01
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.sleep(0.01, 'a', loop=loop)
@asyncio.coroutine
def foo():
for f in asyncio.as_completed([a], timeout=1, loop=loop):
v = yield from f
self.assertEqual(v, 'a')
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
def test_as_completed_reverse_wait(self):
def gen():
yield 0
yield 0.05
yield 0
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.10, 'b', loop=loop)
fs = {a, b}
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
x = loop.run_until_complete(futs[1])
self.assertEqual(x, 'a')
self.assertAlmostEqual(0.05, loop.time())
loop.advance_time(0.05)
y = loop.run_until_complete(futs[0])
self.assertEqual(y, 'b')
self.assertAlmostEqual(0.10, loop.time())
def test_as_completed_concurrent(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0
self.assertAlmostEqual(0.05, when)
yield 0.05
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.05, 'b', loop=loop)
fs = {a, b}
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
waiter = asyncio.wait(futs, loop=loop)
done, pending = loop.run_until_complete(waiter)
self.assertEqual(set(f.result() for f in done), {'a', 'b'})
def test_as_completed_duplicate_coroutines(self):
@asyncio.coroutine
def coro(s):
return s
@asyncio.coroutine
def runner():
result = []
c = coro('ham')
for f in asyncio.as_completed([c, c, coro('spam')],
loop=self.loop):
result.append((yield from f))
return result
fut = asyncio.Task(runner(), loop=self.loop)
self.loop.run_until_complete(fut)
result = fut.result()
self.assertEqual(set(result), {'ham', 'spam'})
self.assertEqual(len(result), 2)
def test_sleep(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0.05
self.assertAlmostEqual(0.1, when)
yield 0.05
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
@asyncio.coroutine
def sleeper(dt, arg):
yield from asyncio.sleep(dt/2, loop=loop)
res = yield from asyncio.sleep(dt/2, arg, loop=loop)
return res
t = asyncio.Task(sleeper(0.1, 'yeah'), loop=loop)
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'yeah')
self.assertAlmostEqual(0.1, loop.time())
def test_sleep_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
t = asyncio.Task(asyncio.sleep(10.0, 'yeah', loop=loop),
loop=loop)
handle = None
orig_call_later = loop.call_later
def call_later(self, delay, callback, *args):
nonlocal handle
handle = orig_call_later(self, delay, callback, *args)
return handle
loop.call_later = call_later
test_utils.run_briefly(loop)
self.assertFalse(handle._cancelled)
t.cancel()
test_utils.run_briefly(loop)
self.assertTrue(handle._cancelled)
def test_task_cancel_sleeping_task(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(5000, when)
yield 0.1
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
sleepfut = None
@asyncio.coroutine
def sleep(dt):
nonlocal sleepfut
sleepfut = asyncio.sleep(dt, loop=loop)
yield from sleepfut
@asyncio.coroutine
def doit():
sleeper = asyncio.Task(sleep(5000), loop=loop)
loop.call_later(0.1, sleeper.cancel)
try:
yield from sleeper
except asyncio.CancelledError:
return 'cancelled'
else:
return 'slept in'
doer = doit()
self.assertEqual(loop.run_until_complete(doer), 'cancelled')
self.assertAlmostEqual(0.1, loop.time())
def test_task_cancel_waiter_future(self):
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def coro():
yield from fut
task = asyncio.Task(coro(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIs(task._fut_waiter, fut)
task.cancel()
test_utils.run_briefly(self.loop)
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, task)
self.assertIsNone(task._fut_waiter)
self.assertTrue(fut.cancelled())
def test_step_in_completed_task(self):
@asyncio.coroutine
def notmuch():
return 'ko'
gen = notmuch()
task = asyncio.Task(gen, loop=self.loop)
task.set_result('ok')
self.assertRaises(AssertionError, task._step)
gen.close()
def test_step_result(self):
@asyncio.coroutine
def notmuch():
yield None
yield 1
return 'ko'
self.assertRaises(
RuntimeError, self.loop.run_until_complete, notmuch())
def test_step_result_future(self):
# If coroutine returns future, task waits on this future.
class Fut(asyncio.Future):
def __init__(self, *args, **kwds):
self.cb_added = False
super().__init__(*args, **kwds)
def add_done_callback(self, fn):
self.cb_added = True
super().add_done_callback(fn)
fut = Fut(loop=self.loop)
result = None
@asyncio.coroutine
def wait_for_future():
nonlocal result
result = yield from fut
t = asyncio.Task(wait_for_future(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(fut.cb_added)
res = object()
fut.set_result(res)
test_utils.run_briefly(self.loop)
self.assertIs(res, result)
self.assertTrue(t.done())
self.assertIsNone(t.result())
def test_step_with_baseexception(self):
@asyncio.coroutine
def notmutch():
raise BaseException()
task = asyncio.Task(notmutch(), loop=self.loop)
self.assertRaises(BaseException, task._step)
self.assertTrue(task.done())
self.assertIsInstance(task.exception(), BaseException)
def test_baseexception_during_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = test_utils.TestLoop(gen)
self.addCleanup(loop.close)
@asyncio.coroutine
def sleeper():
yield from asyncio.sleep(10, loop=loop)
base_exc = BaseException()
@asyncio.coroutine
def notmutch():
try:
yield from sleeper()
except asyncio.CancelledError:
raise base_exc
task = asyncio.Task(notmutch(), loop=loop)
test_utils.run_briefly(loop)
task.cancel()
self.assertFalse(task.done())
self.assertRaises(BaseException, test_utils.run_briefly, loop)
self.assertTrue(task.done())
self.assertFalse(task.cancelled())
self.assertIs(task.exception(), base_exc)
def test_iscoroutinefunction(self):
def fn():
pass
self.assertFalse(asyncio.iscoroutinefunction(fn))
def fn1():
yield
self.assertFalse(asyncio.iscoroutinefunction(fn1))
@asyncio.coroutine
def fn2():
yield
self.assertTrue(asyncio.iscoroutinefunction(fn2))
def test_yield_vs_yield_from(self):
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def wait_for_future():
yield fut
task = wait_for_future()
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(task)
self.assertFalse(fut.done())
def test_yield_vs_yield_from_generator(self):
@asyncio.coroutine
def coro():
yield
@asyncio.coroutine
def wait_for_future():
gen = coro()
try:
yield gen
finally:
gen.close()
task = wait_for_future()
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, task)
def test_coroutine_non_gen_function(self):
@asyncio.coroutine
def func():
return 'test'
self.assertTrue(asyncio.iscoroutinefunction(func))
coro = func()
self.assertTrue(asyncio.iscoroutine(coro))
res = self.loop.run_until_complete(coro)
self.assertEqual(res, 'test')
def test_coroutine_non_gen_function_return_future(self):
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def func():
return fut
@asyncio.coroutine
def coro():
fut.set_result('test')
t1 = asyncio.Task(func(), loop=self.loop)
t2 = asyncio.Task(coro(), loop=self.loop)
res = self.loop.run_until_complete(t1)
self.assertEqual(res, 'test')
self.assertIsNone(t2.result())
def test_current_task(self):
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
@asyncio.coroutine
def coro(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task)
task = asyncio.Task(coro(self.loop), loop=self.loop)
self.loop.run_until_complete(task)
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
def test_current_task_with_interleaving_tasks(self):
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
fut1 = asyncio.Future(loop=self.loop)
fut2 = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def coro1(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
yield from fut1
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
fut2.set_result(True)
@asyncio.coroutine
def coro2(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
fut1.set_result(True)
yield from fut2
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
task1 = asyncio.Task(coro1(self.loop), loop=self.loop)
task2 = asyncio.Task(coro2(self.loop), loop=self.loop)
self.loop.run_until_complete(asyncio.wait((task1, task2),
loop=self.loop))
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
# Some thorough tests for cancellation propagation through
# coroutines, tasks and wait().
def test_yield_future_passes_cancel(self):
# Cancelling outer() cancels inner() cancels waiter.
proof = 0
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
nonlocal proof
try:
yield from waiter
except asyncio.CancelledError:
proof += 1
raise
else:
self.fail('got past sleep() in inner()')
@asyncio.coroutine
def outer():
nonlocal proof
try:
yield from inner()
except asyncio.CancelledError:
proof += 100 # Expect this path.
else:
proof += 10
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.loop.run_until_complete(f)
self.assertEqual(proof, 101)
self.assertTrue(waiter.cancelled())
def test_yield_wait_does_not_shield_cancel(self):
# Cancelling outer() makes wait() return early, leaves inner()
# running.
proof = 0
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
nonlocal proof
yield from waiter
proof += 1
@asyncio.coroutine
def outer():
nonlocal proof
d, p = yield from asyncio.wait([inner()], loop=self.loop)
proof += 100
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_result(self):
inner = asyncio.Future(loop=self.loop)
outer = asyncio.shield(inner)
inner.set_result(42)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_shield_exception(self):
inner = asyncio.Future(loop=self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
exc = RuntimeError('expected')
inner.set_exception(exc)
test_utils.run_briefly(self.loop)
self.assertIs(outer.exception(), exc)
def test_shield_cancel(self):
inner = asyncio.Future(loop=self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
inner.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
def test_shield_shortcut(self):
fut = asyncio.Future(loop=self.loop)
fut.set_result(42)
res = self.loop.run_until_complete(asyncio.shield(fut))
self.assertEqual(res, 42)
def test_shield_effect(self):
# Cancelling outer() does not affect inner().
proof = 0
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
nonlocal proof
yield from waiter
proof += 1
@asyncio.coroutine
def outer():
nonlocal proof
yield from asyncio.shield(inner(), loop=self.loop)
proof += 100
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_gather(self):
child1 = asyncio.Future(loop=self.loop)
child2 = asyncio.Future(loop=self.loop)
parent = asyncio.gather(child1, child2, loop=self.loop)
outer = asyncio.shield(parent, loop=self.loop)
test_utils.run_briefly(self.loop)
outer.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
self.assertEqual(parent.result(), [1, 2])
def test_gather_shield(self):
child1 = asyncio.Future(loop=self.loop)
child2 = asyncio.Future(loop=self.loop)
inner1 = asyncio.shield(child1, loop=self.loop)
inner2 = asyncio.shield(child2, loop=self.loop)
parent = asyncio.gather(inner1, inner2, loop=self.loop)
test_utils.run_briefly(self.loop)
parent.cancel()
# This should cancel inner1 and inner2 but bot child1 and child2.
test_utils.run_briefly(self.loop)
self.assertIsInstance(parent.exception(), asyncio.CancelledError)
self.assertTrue(inner1.cancelled())
self.assertTrue(inner2.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
def test_as_completed_invalid_args(self):
fut = asyncio.Future(loop=self.loop)
# as_completed() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(fut, loop=self.loop))
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(coroutine_function(), loop=self.loop))
def test_wait_invalid_args(self):
fut = asyncio.Future(loop=self.loop)
# wait() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(fut, loop=self.loop))
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(coroutine_function(), loop=self.loop))
# wait() expects at least a future
self.assertRaises(ValueError, self.loop.run_until_complete,
asyncio.wait([], loop=self.loop))
class GatherTestsBase:
def setUp(self):
self.one_loop = test_utils.TestLoop()
self.other_loop = test_utils.TestLoop()
def tearDown(self):
self.one_loop.close()
self.other_loop.close()
def _run_loop(self, loop):
while loop._ready:
test_utils.run_briefly(loop)
def _check_success(self, **kwargs):
a, b, c = [asyncio.Future(loop=self.one_loop) for i in range(3)]
fut = asyncio.gather(*self.wrap_futures(a, b, c), **kwargs)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
b.set_result(1)
a.set_result(2)
self._run_loop(self.one_loop)
self.assertEqual(cb.called, False)
self.assertFalse(fut.done())
c.set_result(3)
self._run_loop(self.one_loop)
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [2, 1, 3])
def test_success(self):
self._check_success()
self._check_success(return_exceptions=False)
def test_result_exception_success(self):
self._check_success(return_exceptions=True)
def test_one_exception(self):
a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)]
fut = asyncio.gather(*self.wrap_futures(a, b, c, d, e))
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
a.set_result(1)
b.set_exception(exc)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertIs(fut.exception(), exc)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_return_exceptions(self):
a, b, c, d = [asyncio.Future(loop=self.one_loop) for i in range(4)]
fut = asyncio.gather(*self.wrap_futures(a, b, c, d),
return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
exc2 = RuntimeError()
b.set_result(1)
c.set_exception(exc)
a.set_result(3)
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_exception(exc2)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [3, 1, exc, exc2])
def test_env_var_debug(self):
path = os.path.dirname(asyncio.__file__)
path = os.path.normpath(os.path.join(path, '..'))
code = '\n'.join((
'import sys',
'sys.path.insert(0, %r)' % path,
'import asyncio.tasks',
'print(asyncio.tasks._DEBUG)'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
class FutureGatherTests(GatherTestsBase, unittest.TestCase):
def wrap_futures(self, *futures):
return futures
def _check_empty_sequence(self, seq_or_iter):
asyncio.set_event_loop(self.one_loop)
self.addCleanup(asyncio.set_event_loop, None)
fut = asyncio.gather(*seq_or_iter)
self.assertIsInstance(fut, asyncio.Future)
self.assertIs(fut._loop, self.one_loop)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
self.assertEqual(fut.result(), [])
fut = asyncio.gather(*seq_or_iter, loop=self.other_loop)
self.assertIs(fut._loop, self.other_loop)
def test_constructor_empty_sequence(self):
self._check_empty_sequence([])
self._check_empty_sequence(())
self._check_empty_sequence(set())
self._check_empty_sequence(iter(""))
def test_constructor_heterogenous_futures(self):
fut1 = asyncio.Future(loop=self.one_loop)
fut2 = asyncio.Future(loop=self.other_loop)
with self.assertRaises(ValueError):
asyncio.gather(fut1, fut2)
with self.assertRaises(ValueError):
asyncio.gather(fut1, loop=self.other_loop)
def test_constructor_homogenous_futures(self):
children = [asyncio.Future(loop=self.other_loop) for i in range(3)]
fut = asyncio.gather(*children)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
fut = asyncio.gather(*children, loop=self.other_loop)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
def test_one_cancellation(self):
a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)]
fut = asyncio.gather(a, b, c, d, e)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
b.cancel()
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertFalse(fut.cancelled())
self.assertIsInstance(fut.exception(), asyncio.CancelledError)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_result_exception_one_cancellation(self):
a, b, c, d, e, f = [asyncio.Future(loop=self.one_loop)
for i in range(6)]
fut = asyncio.gather(a, b, c, d, e, f, return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
zde = ZeroDivisionError()
b.set_exception(zde)
c.cancel()
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_result(3)
e.cancel()
rte = RuntimeError()
f.set_exception(rte)
res = self.one_loop.run_until_complete(fut)
self.assertIsInstance(res[2], asyncio.CancelledError)
self.assertIsInstance(res[4], asyncio.CancelledError)
res[2] = res[4] = None
self.assertEqual(res, [1, zde, None, 3, None, rte])
cb.assert_called_once_with(fut)
class CoroutineGatherTests(GatherTestsBase, unittest.TestCase):
def setUp(self):
super().setUp()
asyncio.set_event_loop(self.one_loop)
def tearDown(self):
asyncio.set_event_loop(None)
super().tearDown()
def wrap_futures(self, *futures):
coros = []
for fut in futures:
@asyncio.coroutine
def coro(fut=fut):
return (yield from fut)
coros.append(coro())
return coros
def test_constructor_loop_selection(self):
@asyncio.coroutine
def coro():
return 'abc'
gen1 = coro()
gen2 = coro()
fut = asyncio.gather(gen1, gen2)
self.assertIs(fut._loop, self.one_loop)
gen1.close()
gen2.close()
gen3 = coro()
gen4 = coro()
fut = asyncio.gather(gen3, gen4, loop=self.other_loop)
self.assertIs(fut._loop, self.other_loop)
gen3.close()
gen4.close()
def test_duplicate_coroutines(self):
@asyncio.coroutine
def coro(s):
return s
c = coro('abc')
fut = asyncio.gather(c, c, coro('def'), c, loop=self.one_loop)
self._run_loop(self.one_loop)
self.assertEqual(fut.result(), ['abc', 'abc', 'def', 'abc'])
def test_cancellation_broadcast(self):
# Cancelling outer() cancels all children.
proof = 0
waiter = asyncio.Future(loop=self.one_loop)
@asyncio.coroutine
def inner():
nonlocal proof
yield from waiter
proof += 1
child1 = asyncio.async(inner(), loop=self.one_loop)
child2 = asyncio.async(inner(), loop=self.one_loop)
gatherer = None
@asyncio.coroutine
def outer():
nonlocal proof, gatherer
gatherer = asyncio.gather(child1, child2, loop=self.one_loop)
yield from gatherer
proof += 100
f = asyncio.async(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
self.assertTrue(f.cancel())
with self.assertRaises(asyncio.CancelledError):
self.one_loop.run_until_complete(f)
self.assertFalse(gatherer.cancel())
self.assertTrue(waiter.cancelled())
self.assertTrue(child1.cancelled())
self.assertTrue(child2.cancelled())
test_utils.run_briefly(self.one_loop)
self.assertEqual(proof, 0)
def test_exception_marking(self):
# Test for the first line marked "Mark exception retrieved."
@asyncio.coroutine
def inner(f):
yield from f
raise RuntimeError('should not be ignored')
a = asyncio.Future(loop=self.one_loop)
b = asyncio.Future(loop=self.one_loop)
@asyncio.coroutine
def outer():
yield from asyncio.gather(inner(a), inner(b), loop=self.one_loop)
f = asyncio.async(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
a.set_result(None)
test_utils.run_briefly(self.one_loop)
b.set_result(None)
test_utils.run_briefly(self.one_loop)
self.assertIsInstance(f.exception(), RuntimeError)
if __name__ == '__main__':
unittest.main()
| |
import gevent
import re
from gevent import queue
from wal_e import exception
from wal_e import log_help
from wal_e import storage
logger = log_help.WalELogger(__name__)
generic_weird_key_hint_message = ('This means an unexpected key was found in '
'a WAL-E prefix. It can be harmless, or '
'the result a bug or misconfiguration.')
class _Deleter(object):
def __init__(self):
# Allow enqueuing of several API calls worth of work, which
# right now allow 1000 key deletions per job.
self.PAGINATION_MAX = 1000
self._q = queue.JoinableQueue(self.PAGINATION_MAX * 10)
self._worker = gevent.spawn(self._work)
self._parent_greenlet = gevent.getcurrent()
self.closing = False
def close(self):
self.closing = True
self._q.join()
self._worker.kill(block=True)
def delete(self, key):
if self.closing:
raise exception.UserCritical(
msg='attempt to delete while closing Deleter detected',
hint='This should be reported as a bug.')
self._q.put(key)
def _work(self):
try:
while True:
# If _cut_batch has an error, it is responsible for
# invoking task_done() the appropriate number of
# times.
page = self._cut_batch()
# If nothing was enqueued, yield and wait around a bit
# before looking for work again.
if not page:
gevent.sleep(1)
continue
# However, in event of success, the jobs are not
# considered done until the _delete_batch returns
# successfully. In event an exception is raised, it
# will be propagated to the Greenlet that created the
# Deleter, but the tasks are marked done nonetheless.
try:
self._delete_batch(page)
finally:
for i in xrange(len(page)):
self._q.task_done()
except KeyboardInterrupt, e:
# Absorb-and-forward the exception instead of using
# gevent's link_exception operator, because in gevent <
# 1.0 there is no way to turn off the alarming stack
# traces emitted when an exception propagates to the top
# of a greenlet, linked or no.
#
# Normally, gevent.kill is ill-advised because it results
# in asynchronous exceptions being raised in that
# greenlet, but given that KeyboardInterrupt is nominally
# asynchronously raised by receiving SIGINT to begin with,
# there nothing obvious being lost from using kill() in
# this case.
gevent.kill(self._parent_greenlet, e)
def _cut_batch(self):
# Attempt to obtain as much work as possible, up to the
# maximum able to be processed by S3 at one time,
# PAGINATION_MAX.
page = []
try:
for i in xrange(self.PAGINATION_MAX):
page.append(self._q.get_nowait())
except queue.Empty:
pass
except:
# In event everything goes sideways while dequeuing,
# carefully un-lock the queue.
for i in xrange(len(page)):
self._q.task_done()
raise
return page
class _BackupList(object):
def __init__(self, conn, layout, detail):
self.conn = conn
self.layout = layout
self.detail = detail
def find_all(self, query):
"""A procedure to assist in finding or detailing specific backups
Currently supports:
* a backup name (base_number_number)
* the psuedo-name LATEST, which finds the backup with the most
recent modification date
"""
match = re.match(storage.BASE_BACKUP_REGEXP, query)
if match is not None:
for backup in iter(self):
if backup.name == query:
yield backup
elif query == 'LATEST':
all_backups = list(iter(self))
if not all_backups:
return
assert len(all_backups) > 0
all_backups.sort(key=lambda bi: bi.last_modified)
yield all_backups[-1]
else:
raise exception.UserException(
msg='invalid backup query submitted',
detail='The submitted query operator was "{0}."'
.format(query))
def _backup_list(self):
raise NotImplementedError()
def __iter__(self):
# Try to identify the sentinel file. This is sort of a drag, the
# storage format should be changed to put them in their own leaf
# directory.
#
# TODO: change storage format
sentinel_depth = self.layout.basebackups().count('/')
matcher = re.compile(storage.COMPLETE_BASE_BACKUP_REGEXP).match
for key in self._backup_list(self.layout.basebackups()):
key_name = self.layout.key_name(key)
# Use key depth vs. base and regexp matching to find
# sentinel files.
key_depth = key_name.count('/')
if key_depth == sentinel_depth:
backup_sentinel_name = key_name.rsplit('/', 1)[-1]
match = matcher(backup_sentinel_name)
if match:
# TODO: It's necessary to use the name of the file to
# get the beginning wal segment information, whereas
# the ending information is encoded into the file
# itself. Perhaps later on it should all be encoded
# into the name when the sentinel files are broken out
# into their own directory, so that S3 listing gets
# all commonly useful information without doing a
# request-per.
groups = match.groupdict()
info = storage.get_backup_info(
self.layout,
name='base_{filename}_{offset}'.format(**groups),
last_modified=self.layout.key_last_modified(key),
wal_segment_backup_start=groups['filename'],
wal_segment_offset_backup_start=groups['offset'])
if self.detail:
try:
# This costs one web request
info.load_detail(self.conn)
except gevent.Timeout:
pass
yield info
class _DeleteFromContext(object):
def __init__(self, conn, layout, dry_run):
self.conn = conn
self.dry_run = dry_run
self.layout = layout
self.deleter = None # Must be set by subclass
assert self.dry_run in (True, False)
def _container_name(self, key):
pass
def _maybe_delete_key(self, key, type_of_thing):
key_name = self.layout.key_name(key)
url = '{scheme}://{bucket}/{name}'.format(
scheme=self.layout.scheme, bucket=self._container_name(key),
name=key_name)
log_message = dict(
msg='deleting {0}'.format(type_of_thing),
detail='The key being deleted is {url}.'.format(url=url))
if self.dry_run is False:
logger.info(**log_message)
self.deleter.delete(key)
elif self.dry_run is True:
log_message['hint'] = ('This is only a dry run -- no actual data '
'is being deleted')
logger.info(**log_message)
else:
assert False
def _groupdict_to_segment_number(self, d):
return storage.base.SegmentNumber(log=d['log'], seg=d['seg'])
def _delete_if_before(self, delete_horizon_segment_number,
scanned_segment_number, key, type_of_thing):
if scanned_segment_number.as_an_integer < \
delete_horizon_segment_number.as_an_integer:
self._maybe_delete_key(key, type_of_thing)
def _delete_base_backups_before(self, segment_info):
base_backup_sentinel_depth = self.layout.basebackups().count('/') + 1
version_depth = base_backup_sentinel_depth + 1
volume_backup_depth = version_depth + 1
# The base-backup sweep, deleting bulk data and metadata, but
# not any wal files.
for key in self._backup_list(prefix=self.layout.basebackups()):
key_name = self.layout.key_name(key)
url = '{scheme}://{bucket}/{name}'.format(
scheme=self.layout.scheme, bucket=self._container_name(key),
name=key_name)
key_parts = key_name.split('/')
key_depth = len(key_parts)
if key_depth not in (base_backup_sentinel_depth, version_depth,
volume_backup_depth):
# Check depth (in terms of number of
# slashes/delimiters in the key); if there exists a
# key with an unexpected depth relative to the
# context, complain a little bit and move on.
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=(
'The unexpected key is "{0}", and it appears to be '
'at an unexpected depth.'.format(url)),
hint=generic_weird_key_hint_message)
elif key_depth == base_backup_sentinel_depth:
# This is a key at the base-backup-sentinel file
# depth, so check to see if it matches the known form.
match = re.match(storage.COMPLETE_BASE_BACKUP_REGEXP,
key_parts[-1])
if match is None:
# This key was at the level for a base backup
# sentinel, but doesn't match the known pattern.
# Complain about this, and move on.
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=('The unexpected key is "{0}", and it appears '
'not to match the base-backup sentinel '
'pattern.'.format(url)),
hint=generic_weird_key_hint_message)
else:
# This branch actually might delete some data: the
# key is at the right level, and matches the right
# form. The last check is to make sure it's in
# the range of things to delete, and if that is
# the case, attempt deletion.
assert match is not None
scanned_sn = \
self._groupdict_to_segment_number(match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a base backup sentinel file')
elif key_depth == version_depth:
match = re.match(
storage.BASE_BACKUP_REGEXP, key_parts[-2])
if match is None or key_parts[-1] != 'extended_version.txt':
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=('The unexpected key is "{0}", and it appears '
'not to match the extended-version backup '
'pattern.'.format(url)),
hint=generic_weird_key_hint_message)
else:
assert match is not None
scanned_sn = \
self._groupdict_to_segment_number(match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a extended version metadata file')
elif key_depth == volume_backup_depth:
# This has the depth of a base-backup volume, so try
# to match the expected pattern and delete it if the
# pattern matches and the base backup part qualifies
# properly.
assert len(key_parts) >= 2, ('must be a logical result of the '
's3 storage layout')
match = re.match(
storage.BASE_BACKUP_REGEXP, key_parts[-3])
if match is None or key_parts[-2] != 'tar_partitions':
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=(
'The unexpected key is "{0}", and it appears '
'not to match the base-backup partition pattern.'
.format(url)),
hint=generic_weird_key_hint_message)
else:
assert match is not None
scanned_sn = \
self._groupdict_to_segment_number(match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a base backup volume')
else:
assert False
def _delete_wals_before(self, segment_info):
"""
Delete all WAL files before segment_info.
Doesn't delete any base-backup data.
"""
wal_key_depth = self.layout.wal_directory().count('/') + 1
for key in self._backup_list(prefix=self.layout.wal_directory()):
key_name = self.layout.key_name(key)
bucket = self._container_name(key)
url = '{scm}://{bucket}/{name}'.format(scm=self.layout.scheme,
bucket=bucket,
name=key_name)
key_parts = key_name.split('/')
key_depth = len(key_parts)
if key_depth != wal_key_depth:
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=(
'The unexpected key is "{0}", and it appears to be '
'at an unexpected depth.'.format(url)),
hint=generic_weird_key_hint_message)
elif key_depth == wal_key_depth:
segment_match = (re.match(storage.SEGMENT_REGEXP + r'\.lzo',
key_parts[-1]))
label_match = (re.match(storage.SEGMENT_REGEXP +
r'\.[A-F0-9]{8,8}.backup.lzo',
key_parts[-1]))
history_match = re.match(r'[A-F0-9]{8,8}\.history',
key_parts[-1])
all_matches = [segment_match, label_match, history_match]
non_matches = len(list(m for m in all_matches if m is None))
# These patterns are intended to be mutually
# exclusive, so either one should match or none should
# match.
assert non_matches in (len(all_matches) - 1, len(all_matches))
if non_matches == len(all_matches):
logger.warning(
msg="skipping non-qualifying key in 'delete before'",
detail=('The unexpected key is "{0}", and it appears '
'not to match the WAL file naming pattern.'
.format(url)),
hint=generic_weird_key_hint_message)
elif segment_match is not None:
scanned_sn = self._groupdict_to_segment_number(
segment_match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a wal file')
elif label_match is not None:
scanned_sn = self._groupdict_to_segment_number(
label_match.groupdict())
self._delete_if_before(segment_info, scanned_sn, key,
'a backup history file')
elif history_match is not None:
# History (timeline) files do not have any actual
# WAL position information, so they are never
# deleted.
pass
else:
assert False
else:
assert False
def delete_everything(self):
"""Delete everything in a storage layout
Named provocatively for a reason: can (and in fact intended
to) cause irrecoverable loss of data. This can be used to:
* Completely obliterate data from old WAL-E versions
(i.e. layout.VERSION is an obsolete version)
* Completely obliterate all backups (from a decommissioned
database, for example)
"""
for k in self._backup_list(prefix=self.layout.basebackups()):
self._maybe_delete_key(k, 'part of a base backup')
for k in self._backup_list(prefix=self.layout.wal_directory()):
self._maybe_delete_key(k, 'part of wal logs')
if self.deleter:
self.deleter.close()
def delete_before(self, segment_info):
"""
Delete all base backups and WAL before a given segment
This is the most commonly-used deletion operator; to delete
old backups and WAL.
"""
# This will delete all base backup data before segment_info.
self._delete_base_backups_before(segment_info)
# This will delete all WAL segments before segment_info.
self._delete_wals_before(segment_info)
if self.deleter:
self.deleter.close()
def delete_with_retention(self, num_to_retain):
"""
Retain the num_to_retain most recent backups and delete all data
before them.
"""
base_backup_sentinel_depth = self.layout.basebackups().count('/') + 1
# Sweep over base backup files, collecting sentinel files from
# completed backups.
completed_basebackups = []
for key in self._backup_list(prefix=self.layout.basebackups()):
key_name = self.layout.key_name(key)
key_parts = key_name.split('/')
key_depth = len(key_parts)
url = '{scheme}://{bucket}/{name}'.format(
scheme=self.layout.scheme,
bucket=self._container_name(key),
name=key_name)
if key_depth == base_backup_sentinel_depth:
# This is a key at the depth of a base-backup-sentinel file.
# Check to see if it matches the known form.
match = re.match(storage.COMPLETE_BASE_BACKUP_REGEXP,
key_parts[-1])
# If this isn't a base-backup-sentinel file, just ignore it.
if match is None:
continue
# This key corresponds to a base-backup-sentinel file and
# represents a completed backup. Grab its segment number.
scanned_sn = \
self._groupdict_to_segment_number(match.groupdict())
completed_basebackups.append(dict(
scanned_sn=scanned_sn,
url=url))
# Sort the base backups from newest to oldest.
basebackups = sorted(
completed_basebackups,
key=lambda backup: backup['scanned_sn'].as_an_integer,
reverse=True)
last_retained = None
if len(basebackups) <= num_to_retain:
detail = None
if len(basebackups) == 0:
msg = 'Not deleting any data.'
detail = 'No existing base backups.'
elif len(basebackups) == 1:
last_retained = basebackups[-1]
msg = 'Retaining existing base backup.'
else:
last_retained = basebackups[-1]
msg = "Retaining all %d base backups." % len(basebackups)
else:
last_retained = basebackups[num_to_retain - 1]
num_deleting = len(basebackups) - num_to_retain
msg = "Deleting %d oldest base backups." % num_deleting
detail = "Found %d total base backups." % len(basebackups)
log_message = dict(msg=msg)
if detail is not None:
log_message['detail'] = detail
if last_retained is not None:
log_message['hint'] = \
"Deleting keys older than %s." % last_retained['url']
logger.info(**log_message)
# This will delete all base backup and WAL data before
# last_retained['scanned_sn'].
if last_retained is not None:
self._delete_base_backups_before(last_retained['scanned_sn'])
self._delete_wals_before(last_retained['scanned_sn'])
if self.deleter:
self.deleter.close()
| |
__author__ = 'arobres'
# -*- coding: utf-8 -*-
from lettuce import step, world, before, after
from commons.authentication import get_token
from commons.rest_utils import RestUtils
from commons.product_body import default_product, create_default_metadata_list, create_default_attribute_list
from commons.utils import dict_to_xml, set_default_headers, xml_to_dict, response_body_to_dict, \
replace_none_value_metadata_to_empty_string
from commons.constants import CONTENT_TYPE, CONTENT_TYPE_JSON, PRODUCT_NAME, ACCEPT_HEADER, AUTH_TOKEN_HEADER, PRODUCT, \
PRODUCT_DESCRIPTION, PRODUCT_ATTRIBUTES, PRODUCT_METADATAS, DEFAULT_METADATA, METADATA, ATTRIBUTE
from nose.tools import assert_equals, assert_true, assert_in
api_utils = RestUtils()
@before.each_feature
def setup_feature(feature):
world.token_id, world.tenant_id = get_token()
@before.each_scenario
def setup_scenario(scenario):
world.headers = set_default_headers(world.token_id, world.tenant_id)
api_utils.delete_all_testing_products(world.headers)
world.attributes = None
world.metadatas = None
@before.outline
def setup_outline(param1, param2, param3, param4):
setup_scenario(None)
@step(u'Given a created product with name "([^"]*)"')
def given_a_created_product_with_name_group1(step, product_id):
world.created_product_body = default_product(name=product_id)
body = dict_to_xml(world.created_product_body)
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
@step(u'Given a created product with attributes and name "([^"]*)"')
def given_a_created_product_with_attributes_and_name_group1(step, product_id):
world.attributes = create_default_attribute_list(2)
world.created_product_body = default_product(name=product_id, attributes=world.attributes)
body = dict_to_xml(world.created_product_body)
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
@step(u'Given a created product with metadatas and name "([^"]*)"')
def given_a_created_product_with_attributes_and_name_group1(step, product_id):
world.metadatas = create_default_metadata_list(2)
world.created_product_body = default_product(name=product_id, metadata=world.metadatas)
body = dict_to_xml(world.created_product_body)
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
@step(u'Given a created product with all data and name "([^"]*)"')
def given_a_created_product_with_all_data_and_name_group1(step, product_id):
world.metadatas = create_default_metadata_list(5)
world.attributes = create_default_attribute_list(5)
world.created_product_body = default_product(name=product_id, metadata=world.metadatas, attributes=world.attributes)
body = dict_to_xml(world.created_product_body)
response = api_utils.add_new_product(headers=world.headers, body=body)
assert_true(response.ok, response.content)
world.product_id = response.json()[PRODUCT_NAME]
@step(u'When I retrieve the product "([^"]*)" with accept parameter "([^"]*)" response')
def when_i_retrieve_the_product_group1_with_accept_parameter_group2_response(step, product_id, accept_content):
world.headers[ACCEPT_HEADER] = accept_content
world.response = api_utils.retrieve_product(headers=world.headers, product_id=product_id)
@step(u'When I retrieve the product attributes "([^"]*)" with accept parameter "([^"]*)" response')
def retrieve_product_attributes(step, product_id, accept_content):
world.headers[ACCEPT_HEADER] = accept_content
world.response = api_utils.retrieve_product_attributes(headers=world.headers, product_id=product_id)
@step(u'When I retrieve the product metadatas "([^"]*)" with accept parameter "([^"]*)" response')
def retrieve_product_metadatas(step, product_id, accept_content):
world.headers[ACCEPT_HEADER] = accept_content
world.response = api_utils.retrieve_product_metadatas(headers=world.headers, product_id=product_id)
@step(u'Then the product is retrieved')
def then_the_product_is_retrieved(step):
assert_true(world.response.ok)
response_headers = world.response.headers
if response_headers[CONTENT_TYPE] == CONTENT_TYPE_JSON:
try:
response_body = world.response.json()
except Exception, e:
print str(e)
else:
response_body = xml_to_dict(world.response.content)[PRODUCT]
assert_equals(response_body[PRODUCT_NAME], world.created_product_body[PRODUCT][PRODUCT_NAME])
assert_equals(response_body[PRODUCT_DESCRIPTION], world.created_product_body[PRODUCT][PRODUCT_DESCRIPTION])
if world.attributes is not None:
assert_equals(world.created_product_body[PRODUCT][PRODUCT_ATTRIBUTES], response_body[PRODUCT_ATTRIBUTES])
world.attributes = None
if world.metadatas is not None:
for metadata in world.created_product_body[PRODUCT][PRODUCT_METADATAS]:
assert_in(metadata, response_body[PRODUCT_METADATAS])
world.metadatas = None
@step(u'Then I obtain an "([^"]*)"')
def then_i_obtain_an_group1(step, error_code):
assert_equals(str(world.response.status_code), error_code)
world.headers = set_default_headers(world.token_id, world.tenant_id)
@step(u'Then the attributes product are empty')
def then_the_attributes_product_are_empty(step):
assert_true(world.response.ok)
assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response.content))
response_body = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],
xml_root_element_name=PRODUCT_ATTRIBUTES, is_list=True)
assert_true(response_body is None or len(response_body) == 0)
@step(u'Then the attributes product are retrieved')
def then_the_attributes_product_are_retrieved(step):
assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response.content))
response_body = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],
xml_root_element_name=PRODUCT_ATTRIBUTES, is_list=True)
assert_equals(world.created_product_body[PRODUCT][PRODUCT_ATTRIBUTES], response_body)
@step(u'Then the metadatas product contain default metadatas')
def then_the_metadatas_product_contain_default_metadatas(step):
assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response.content))
response_body = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],
xml_root_element_name=PRODUCT_METADATAS, is_list=True)
assert_equals(len(response_body), 6)
# Add default metadata 'tenant_id'
metadatas_with_tenant = list(DEFAULT_METADATA[METADATA])
metadatas_with_tenant.append({"key": "tenant_id", "value": world.tenant_id})
# Workaround: xmldict manage Empty values as None value
replace_none_value_metadata_to_empty_string(response_body)
assert_equals(response_body, metadatas_with_tenant)
@step(u'Then the metadatas product are retrieved')
def then_the_metadatas_product_are_retrieved(step):
assert_true(world.response.ok, 'RESPONSE: {}'.format(world.response.content))
response_body = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER],
xml_root_element_name=PRODUCT_METADATAS, is_list=True)
if world.metadatas is not None:
for metadata in world.created_product_body[PRODUCT][PRODUCT_METADATAS]:
assert_in(metadata, response_body)
world.metadatas = None
@step(u'And incorrect "([^"]*)" header')
def and_incorrect_content_type_header(step, content_type):
world.headers[CONTENT_TYPE] = content_type
@step(u'And incorrect "([^"]*)" authentication')
def incorrect_token(step, new_token):
world.headers[AUTH_TOKEN_HEADER] = new_token
@after.all
def tear_down(scenario):
world.token_id, world.tenant_id = get_token()
world.headers = set_default_headers(world.token_id, world.tenant_id)
api_utils.delete_all_testing_products(world.headers)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN classifier_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import tempfile
from absl.testing import parameterized
import numpy as np
from scipy import linalg as scp_linalg
from google.protobuf import text_format
from tensorflow.contrib.gan.python.eval.python import classifier_metrics_impl as classifier_metrics
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
mock = test.mock
def _numpy_softmax(x):
e_x = np.exp(x - np.max(x, axis=1)[:, None])
return e_x / np.sum(e_x, axis=1)[:, None]
def _expected_inception_score(logits):
p = _numpy_softmax(logits)
q = np.expand_dims(np.mean(p, 0), 0)
per_example_logincscore = np.sum(p * (np.log(p) - np.log(q)), 1)
return np.exp(np.mean(per_example_logincscore))
def _expected_mean_only_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
mean = np.square(m - m_v).sum()
mofid = mean
return mofid
def _expected_diagonal_only_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
var = np.var(real_imgs, axis=0)
var_v = np.var(gen_imgs, axis=0)
sqcc = np.sqrt(var * var_v)
mean = (np.square(m - m_v)).sum()
trace = (var + var_v - 2 * sqcc).sum()
dofid = mean + trace
return dofid
def _expected_fid(real_imgs, gen_imgs):
m = np.mean(real_imgs, axis=0)
m_v = np.mean(gen_imgs, axis=0)
sigma = np.cov(real_imgs, rowvar=False)
sigma_v = np.cov(gen_imgs, rowvar=False)
sqcc = scp_linalg.sqrtm(np.dot(sigma, sigma_v))
mean = np.square(m - m_v).sum()
trace = np.trace(sigma + sigma_v - 2 * sqcc)
fid = mean + trace
return fid
def _expected_trace_sqrt_product(sigma, sigma_v):
return np.trace(scp_linalg.sqrtm(np.dot(sigma, sigma_v)))
def _expected_kid_and_std(real_imgs, gen_imgs, max_block_size=1024):
n_r, dim = real_imgs.shape
n_g = gen_imgs.shape[0]
n_blocks = int(np.ceil(max(n_r, n_g) / max_block_size))
sizes_r = np.full(n_blocks, n_r // n_blocks)
to_patch = n_r - n_blocks * (n_r // n_blocks)
if to_patch > 0:
sizes_r[-to_patch:] += 1
inds_r = np.r_[0, np.cumsum(sizes_r)]
assert inds_r[-1] == n_r
sizes_g = np.full(n_blocks, n_g // n_blocks)
to_patch = n_g - n_blocks * (n_g // n_blocks)
if to_patch > 0:
sizes_g[-to_patch:] += 1
inds_g = np.r_[0, np.cumsum(sizes_g)]
assert inds_g[-1] == n_g
ests = []
for i in range(n_blocks):
r = real_imgs[inds_r[i]:inds_r[i + 1]]
g = gen_imgs[inds_g[i]:inds_g[i + 1]]
k_rr = (np.dot(r, r.T) / dim + 1)**3
k_rg = (np.dot(r, g.T) / dim + 1)**3
k_gg = (np.dot(g, g.T) / dim + 1)**3
ests.append(-2 * k_rg.mean() +
k_rr[np.triu_indices_from(k_rr, k=1)].mean() +
k_gg[np.triu_indices_from(k_gg, k=1)].mean())
var = np.var(ests, ddof=1) if len(ests) > 1 else np.nan
return np.mean(ests), np.sqrt(var / len(ests))
# A dummy GraphDef string with the minimum number of Ops.
graphdef_string = """
node {
name: "Mul"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 299
}
dim {
size: 299
}
dim {
size: 3
}
}
}
}
}
node {
name: "logits"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 1001
}
}
}
}
}
node {
name: "pool_3"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "shape"
value {
shape {
dim {
size: -1
}
dim {
size: 2048
}
}
}
}
}
versions {
producer: 24
}
"""
def _get_dummy_graphdef():
dummy_graphdef = graph_pb2.GraphDef()
text_format.Merge(graphdef_string, dummy_graphdef)
return dummy_graphdef
def _run_with_mock(function, *args, **kwargs):
with mock.patch.object(
classifier_metrics,
'get_graph_def_from_url_tarball') as mock_tarball_getter:
mock_tarball_getter.return_value = _get_dummy_graphdef()
return function(*args, **kwargs)
class ClassifierMetricsTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('GraphDef', False),
('DefaultGraphDefFn', True))
def test_run_inception_graph(self, use_default_graph_def):
"""Test `run_inception` graph construction."""
batch_size = 7
img = array_ops.ones([batch_size, 299, 299, 3])
if use_default_graph_def:
logits = _run_with_mock(classifier_metrics.run_inception, img)
else:
logits = classifier_metrics.run_inception(img, _get_dummy_graphdef())
self.assertIsInstance(logits, ops.Tensor)
logits.shape.assert_is_compatible_with([batch_size, 1001])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
@parameterized.named_parameters(
('GraphDef', False),
('DefaultGraphDefFn', True))
def test_run_inception_graph_pool_output(self, use_default_graph_def):
"""Test `run_inception` graph construction with pool output."""
batch_size = 3
img = array_ops.ones([batch_size, 299, 299, 3])
if use_default_graph_def:
pool = _run_with_mock(
classifier_metrics.run_inception,
img,
output_tensor=classifier_metrics.INCEPTION_FINAL_POOL)
else:
pool = classifier_metrics.run_inception(
img, _get_dummy_graphdef(),
output_tensor=classifier_metrics.INCEPTION_FINAL_POOL)
self.assertIsInstance(pool, ops.Tensor)
pool.shape.assert_is_compatible_with([batch_size, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_multiple_outputs(self):
"""Test `run_inception` graph construction with multiple outputs."""
batch_size = 3
img = array_ops.ones([batch_size, 299, 299, 3])
logits, pool = _run_with_mock(
classifier_metrics.run_inception,
img,
output_tensor=[
classifier_metrics.INCEPTION_OUTPUT,
classifier_metrics.INCEPTION_FINAL_POOL
])
self.assertIsInstance(logits, ops.Tensor)
self.assertIsInstance(pool, ops.Tensor)
logits.shape.assert_is_compatible_with([batch_size, 1001])
pool.shape.assert_is_compatible_with([batch_size, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_inception_score_graph(self):
"""Test `inception_score` graph construction."""
score = _run_with_mock(
classifier_metrics.inception_score,
array_ops.zeros([6, 299, 299, 3]),
num_batches=3)
self.assertIsInstance(score, ops.Tensor)
score.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_frechet_inception_distance_graph(self):
"""Test `frechet_inception_distance` graph construction."""
img = array_ops.ones([7, 299, 299, 3])
distance = _run_with_mock(
classifier_metrics.frechet_inception_distance, img, img)
self.assertIsInstance(distance, ops.Tensor)
distance.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_kernel_inception_distance_graph(self):
"""Test `frechet_inception_distance` graph construction."""
img = array_ops.ones([7, 299, 299, 3])
distance = _run_with_mock(classifier_metrics.kernel_inception_distance, img,
img)
self.assertIsInstance(distance, ops.Tensor)
distance.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertListEqual([], variables.trainable_variables())
def test_run_inception_multicall(self):
"""Test that `run_inception` can be called multiple times."""
for batch_size in (7, 3, 2):
img = array_ops.ones([batch_size, 299, 299, 3])
_run_with_mock(classifier_metrics.run_inception, img)
def test_invalid_input(self):
"""Test that functions properly fail on invalid input."""
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
classifier_metrics.run_inception(array_ops.ones([7, 50, 50, 3]))
p = array_ops.zeros([8, 10])
p_logits = array_ops.zeros([8, 10])
q = array_ops.zeros([10])
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(
array_ops.zeros([8, 10], dtype=dtypes.int32), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(p,
array_ops.zeros(
[8, 10], dtype=dtypes.int32), q)
with self.assertRaisesRegexp(ValueError, 'must be floating type'):
classifier_metrics._kl_divergence(p, p_logits,
array_ops.zeros(
[10], dtype=dtypes.int32))
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(array_ops.zeros([8]), p_logits, q)
with self.assertRaisesRegexp(ValueError, 'must have rank 2'):
classifier_metrics._kl_divergence(p, array_ops.zeros([8]), q)
with self.assertRaisesRegexp(ValueError, 'must have rank 1'):
classifier_metrics._kl_divergence(p, p_logits, array_ops.zeros([10, 8]))
def test_inception_score_value(self):
"""Test that `inception_score` gives the correct value."""
logits = np.array(
[np.array([1, 2] * 500 + [4]),
np.array([4, 5] * 500 + [6])])
unused_image = array_ops.zeros([2, 299, 299, 3])
incscore = _run_with_mock(classifier_metrics.inception_score, unused_image)
with self.test_session(use_gpu=True) as sess:
incscore_np = sess.run(incscore, {'concat:0': logits})
self.assertAllClose(_expected_inception_score(logits), incscore_np)
def test_mean_only_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
pool_real_a = np.float32(np.random.randn(256, 2048))
pool_gen_a = np.float32(np.random.randn(256, 2048))
tf_pool_real_a = array_ops.constant(pool_real_a)
tf_pool_gen_a = array_ops.constant(pool_gen_a)
mofid_op = classifier_metrics.mean_only_frechet_classifier_distance_from_activations( # pylint: disable=line-too-long
tf_pool_real_a, tf_pool_gen_a)
with self.cached_session() as sess:
actual_mofid = sess.run(mofid_op)
expected_mofid = _expected_mean_only_fid(pool_real_a, pool_gen_a)
self.assertAllClose(expected_mofid, actual_mofid, 0.0001)
def test_diagonal_only_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
pool_real_a = np.float32(np.random.randn(256, 2048))
pool_gen_a = np.float32(np.random.randn(256, 2048))
tf_pool_real_a = array_ops.constant(pool_real_a)
tf_pool_gen_a = array_ops.constant(pool_gen_a)
dofid_op = classifier_metrics.diagonal_only_frechet_classifier_distance_from_activations( # pylint: disable=line-too-long
tf_pool_real_a, tf_pool_gen_a)
with self.cached_session() as sess:
actual_dofid = sess.run(dofid_op)
expected_dofid = _expected_diagonal_only_fid(pool_real_a, pool_gen_a)
self.assertAllClose(expected_dofid, actual_dofid, 0.0001)
def test_frechet_classifier_distance_value(self):
"""Test that `frechet_classifier_distance` gives the correct value."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256))
fid_op = _run_with_mock(
classifier_metrics.frechet_classifier_distance,
test_pool_real_a,
test_pool_gen_a,
classifier_fn=lambda x: x)
with self.cached_session() as sess:
actual_fid = sess.run(fid_op)
expected_fid = _expected_fid(test_pool_real_a, test_pool_gen_a)
self.assertAllClose(expected_fid, actual_fid, 0.0001)
def test_frechet_classifier_distance_covariance(self):
"""Test that `frechet_classifier_distance` takes covariance into account."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_reals, test_pool_gens = [], []
for i in range(1, 11, 2):
test_pool_reals.append(np.float32(np.random.randn(2048, 256) * i))
test_pool_gens.append(np.float32(np.random.randn(2048, 256) * i))
fid_ops = []
for i in range(len(test_pool_reals)):
fid_ops.append(_run_with_mock(
classifier_metrics.frechet_classifier_distance,
test_pool_reals[i],
test_pool_gens[i],
classifier_fn=lambda x: x))
fids = []
with self.cached_session() as sess:
for fid_op in fid_ops:
fids.append(sess.run(fid_op))
# Check that the FIDs increase monotonically.
self.assertTrue(all(fid_a < fid_b for fid_a, fid_b in zip(fids, fids[1:])))
def test_kernel_classifier_distance_value(self):
"""Test that `kernel_classifier_distance` gives the correct value."""
np.random.seed(0)
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256) * 1.1 + .05)
kid_op = _run_with_mock(
classifier_metrics.kernel_classifier_distance_and_std,
test_pool_real_a,
test_pool_gen_a,
classifier_fn=lambda x: x,
max_block_size=600)
with self.test_session() as sess:
actual_kid, actual_std = sess.run(kid_op)
expected_kid, expected_std = _expected_kid_and_std(test_pool_real_a,
test_pool_gen_a)
self.assertAllClose(expected_kid, actual_kid, 0.001)
self.assertAllClose(expected_std, actual_std, 0.001)
def test_kernel_classifier_distance_block_sizes(self):
"""Test that `kernel_classifier_distance` works with unusual max_block_size
values..
"""
np.random.seed(0)
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(768, 256) * 1.1 + .05)
max_block_size = array_ops.placeholder(dtypes.int32, shape=())
kid_op = _run_with_mock(
classifier_metrics.kernel_classifier_distance_and_std_from_activations,
array_ops.constant(test_pool_real_a),
array_ops.constant(test_pool_gen_a),
max_block_size=max_block_size)
for block_size in [50, 512, 1000]:
with self.test_session() as sess:
actual_kid, actual_std = sess.run(kid_op, {max_block_size: block_size})
expected_kid, expected_std = _expected_kid_and_std(
test_pool_real_a, test_pool_gen_a, max_block_size=block_size)
self.assertAllClose(expected_kid, actual_kid, 0.001)
self.assertAllClose(expected_std, actual_std, 0.001)
def test_trace_sqrt_product_value(self):
"""Test that `trace_sqrt_product` gives the correct value."""
np.random.seed(0)
# Make num_examples > num_features to ensure scipy's sqrtm function
# doesn't return a complex matrix.
test_pool_real_a = np.float32(np.random.randn(512, 256))
test_pool_gen_a = np.float32(np.random.randn(512, 256))
cov_real = np.cov(test_pool_real_a, rowvar=False)
cov_gen = np.cov(test_pool_gen_a, rowvar=False)
trace_sqrt_prod_op = _run_with_mock(classifier_metrics.trace_sqrt_product,
cov_real, cov_gen)
with self.cached_session() as sess:
# trace_sqrt_product: tsp
actual_tsp = sess.run(trace_sqrt_prod_op)
expected_tsp = _expected_trace_sqrt_product(cov_real, cov_gen)
self.assertAllClose(actual_tsp, expected_tsp, 0.01)
def test_preprocess_image_graph(self):
"""Test `preprocess_image` graph construction."""
incorrectly_sized_image = array_ops.zeros([520, 240, 3])
correct_image = classifier_metrics.preprocess_image(
images=incorrectly_sized_image)
_run_with_mock(classifier_metrics.run_inception,
array_ops.expand_dims(correct_image, 0))
def test_get_graph_def_from_url_tarball(self):
"""Test `get_graph_def_from_url_tarball`."""
# Write dummy binary GraphDef to tempfile.
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write(_get_dummy_graphdef().SerializeToString())
relative_path = os.path.relpath(tmp_file.name)
# Create gzip tarball.
tar_dir = tempfile.mkdtemp()
tar_filename = os.path.join(tar_dir, 'tmp.tar.gz')
with tarfile.open(tar_filename, 'w:gz') as tar:
tar.add(relative_path)
with mock.patch.object(classifier_metrics, 'urllib') as mock_urllib:
mock_urllib.request.urlretrieve.return_value = tar_filename, None
graph_def = classifier_metrics.get_graph_def_from_url_tarball(
'unused_url', relative_path)
self.assertIsInstance(graph_def, graph_pb2.GraphDef)
self.assertEqual(_get_dummy_graphdef(), graph_def)
if __name__ == '__main__':
test.main()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import flask_login
from flask import redirect, request, url_for
# Need to expose these downstream
# flake8: noqa: F401
from flask_login import current_user, login_required, login_user, logout_user
from flask_oauthlib.client import OAuth
from airflow import models
from airflow.configuration import AirflowConfigException, conf
from airflow.utils.session import provide_session
log = logging.getLogger(__name__)
def get_config_param(param):
return str(conf.get('github_enterprise', param))
class GHEUser(models.User):
def __init__(self, user):
self.user = user
@property
def is_active(self):
"""Required by flask_login"""
return True
@property
def is_authenticated(self):
"""Required by flask_login"""
return True
@property
def is_anonymous(self):
"""Required by flask_login"""
return False
def get_id(self):
"""Returns the current user id as required by flask_login"""
return self.user.get_id()
def data_profiling(self):
"""Provides access to data profiling tools"""
return True
def is_superuser(self):
"""Access all the things"""
return True
class AuthenticationError(Exception):
pass
class GHEAuthBackend:
def __init__(self):
self.ghe_host = get_config_param('host')
self.login_manager = flask_login.LoginManager()
self.login_manager.login_view = 'airflow.login'
self.flask_app = None
self.ghe_oauth = None
self.api_url = None
def ghe_api_route(self, leaf):
if not self.api_url:
self.api_url = (
'https://api.github.com' if self.ghe_host == 'github.com'
else '/'.join(['https:/',
self.ghe_host,
'api',
get_config_param('api_rev')])
)
return self.api_url + leaf
def init_app(self, flask_app):
self.flask_app = flask_app
self.login_manager.init_app(self.flask_app)
self.ghe_oauth = OAuth(self.flask_app).remote_app(
'ghe',
consumer_key=get_config_param('client_id'),
consumer_secret=get_config_param('client_secret'),
# need read:org to get team member list
request_token_params={'scope': 'user:email,read:org'},
base_url=self.ghe_host,
request_token_url=None,
access_token_method='POST',
access_token_url=''.join(['https://',
self.ghe_host,
'/login/oauth/access_token']),
authorize_url=''.join(['https://',
self.ghe_host,
'/login/oauth/authorize']))
self.login_manager.user_loader(self.load_user)
self.flask_app.add_url_rule(get_config_param('oauth_callback_route'),
'ghe_oauth_callback',
self.oauth_callback)
def login(self, request):
log.debug('Redirecting user to GHE login')
return self.ghe_oauth.authorize(callback=url_for(
'ghe_oauth_callback',
_external=True),
state=request.args.get('next') or request.referrer or None)
def get_ghe_user_profile_info(self, ghe_token):
resp = self.ghe_oauth.get(self.ghe_api_route('/user'),
token=(ghe_token, ''))
if not resp or resp.status != 200:
raise AuthenticationError(
'Failed to fetch user profile, status ({0})'.format(
resp.status if resp else 'None'))
return resp.data['login'], resp.data['email']
def ghe_team_check(self, username, ghe_token):
try:
# the response from ghe returns the id of the team as an integer
try:
allowed_teams = [int(team.strip())
for team in
get_config_param('allowed_teams').split(',')]
except ValueError:
# this is to deprecate using the string name for a team
raise ValueError(
'it appears that you are using the string name for a team, '
'please use the id number instead')
except AirflowConfigException:
# No allowed teams defined, let anyone in GHE in.
return True
# https://developer.github.com/v3/orgs/teams/#list-user-teams
resp = self.ghe_oauth.get(self.ghe_api_route('/user/teams'),
token=(ghe_token, ''))
if not resp or resp.status != 200:
raise AuthenticationError(
'Bad response from GHE ({0})'.format(
resp.status if resp else 'None'))
for team in resp.data:
# mylons: previously this line used to be if team['slug'] in teams
# however, teams are part of organizations. organizations are unique,
# but teams are not therefore 'slug' for a team is not necessarily unique.
# use id instead
if team['id'] in allowed_teams:
return True
log.debug('Denying access for user "%s", not a member of "%s"',
username,
str(allowed_teams))
return False
@provide_session
def load_user(self, userid, session=None):
if not userid or userid == 'None':
return None
user = session.query(models.User).filter(
models.User.id == int(userid)).first()
return GHEUser(user)
@provide_session
def oauth_callback(self, session=None):
log.debug('GHE OAuth callback called')
next_url = request.args.get('state') or url_for('admin.index')
resp = self.ghe_oauth.authorized_response()
try:
if resp is None:
raise AuthenticationError(
'Null response from GHE, denying access.'
)
ghe_token = resp['access_token']
username, email = self.get_ghe_user_profile_info(ghe_token)
if not self.ghe_team_check(username, ghe_token):
return redirect(url_for('airflow.noaccess'))
except AuthenticationError:
log.exception('')
return redirect(url_for('airflow.noaccess'))
user = session.query(models.User).filter(
models.User.username == username).first()
if not user:
user = models.User(
username=username,
email=email,
is_superuser=False)
session.merge(user)
session.commit()
login_user(GHEUser(user))
session.commit()
return redirect(next_url)
login_manager = GHEAuthBackend()
def login(self, request):
return login_manager.login(request)
| |
# -*- coding: utf-8 -*-
"""Declarative module configuration with dynamic value injection
Module Declaration
------------------
Modules declare their configuration via `init`. Here is how `pkdebug`
declares its config params::
cfg = pkconfig.init(
control=(None, re.compile, 'Pattern to match against pkdc messages'),
want_pid_time=(False, bool, 'Display pid and time in messages'),
output=(None, _cfg_output, 'Where to write messages either as a "writable" or file name'),
)
A param tuple contains three values:
0. Default value, in the expected type
1. Callable that can convert a string or other type into a valid value
2. A docstring briefly explaining the configuration element
The returned ``cfg`` object is ready to use after the call. It will contain
the config params as defined or an exception will be raised.
Config Values
-------------
Configuration is returned as nested dicts. The values themselves could
be any Python object. In this case, we have a string and a file object
for the two parameters. We called `os.getcwd` and referred to
`sys.stdout` in param values.
Summary
-------
Here are the steps to configuring an application:
1. When the first module calls `init`, pkconfig gets environment variables
to create a single dict of param values, unparsed.
2. `init` looks for the module's params in the unparsed values.
3. If the parameter is found, that value is used. Else, the default is merged
into the dict and used.
4. The parameter value is then resolved with `str.format`. If the value
is a `list` it will be joined with any previous value (e.g. default).
5. The resolved value is parsed using the param's declared ``parser``.
6. The result is stored in the merged config and also stored in the module's
`Params` object .
7. Once all params have been parsed for the module, `init` returns the `Params`
object to the module, which can then use those params to initialize itself.
:copyright: Copyright (c) 2015-2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
# Import the minimum number of modules and none from pykern
# pkconfig is the first module imported by all other modules in pykern
import collections
import copy
import importlib
import inspect
import os
import re
import sys
# These modules have very limited imports to avoid loops with config imports
from pykern.pkcollections import PKDict
from pykern import pkconst
from pykern import pkinspect
#: Python version independent value of string instance check
STRING_TYPES = pkconst.STRING_TYPES
#: Environment variable holding channel (defaults to 'dev')
CHANNEL_ATTR = 'pykern_pkconfig_channel'
#: Validate key: Cannot begin with non-letter or end with an underscore
KEY_RE = re.compile('^[a-z][a-z0-9_]*[a-z0-9]$', flags=re.IGNORECASE)
#: parse_tuple splits strings on this
TUPLE_SEP = ':'
#: Order of channels from least to most stable
VALID_CHANNELS = ('dev', 'alpha', 'beta', 'prod')
#: Channels which can have more verbose output from the server
INTERNAL_TEST_CHANNELS = VALID_CHANNELS[0:2]
#: Configuration for this module: channel
cfg = None
#: Initialized channel (same as cfg.channel)
CHANNEL_DEFAULT = VALID_CHANNELS[0]
#: Attribute to detect parser which can parse None
_PARSE_NONE_ATTR = 'pykern_pkconfig_parse_none'
#: Value to add to os.environ (see `reset_state_for_testing`)
_add_to_environ = None
#: All values in environ and add_to_environ
_raw_values = None
#: All values parsed via init()
_parsed_values = None
#: Regex used by `parse_seconds`
_PARSE_SECONDS = re.compile(
r'^(?:(\d+)d)?(?:(?:(?:(\d+):)?(\d+):)?(\d+))?$',
flags=re.IGNORECASE,
)
#: Regex used by `parse_bytes`
_PARSE_BYTES = re.compile(r'^(\d+)([kmgtp]?)b?$', flags=re.IGNORECASE)
#: multiplier used for qualifier on `parse_bytes`
_PARSE_BYTES_MULTIPLIER = PKDict(
k=1024,
m=1024**2,
g=1024**3,
t=1024**4,
)
class ReplacedBy(tuple, object):
"""Container for a required parameter declaration.
Example::
cfg = pkconfig.init(
gone=pkconfig.ReplacedBy('pykern.pkexample.foo'),
)
Args:
new_name: name of new config parameter
"""
@staticmethod
def __new__(cls, new_name):
msg = 'replaced by name=${}'.format(new_name.upper().replace('.', '_'))
return super(ReplacedBy, cls).__new__(
cls,
(None, lambda x: raise_error(msg), msg),
)
class Required(tuple, object):
"""Container for a required parameter declaration.
Example::
cfg = pkconfig.init(
any_param=(1, int, 'A parameter with a default'),
needed=pkconfig.Required(int, 'A parameter with a default'),
)
Args:
converter (callable): how to string to internal value
docstring (str): description of parameter
"""
@staticmethod
def __new__(cls, *args):
assert len(args) == 2, \
'{}: len(args)!=2'.format(args)
return super(Required, cls).__new__(cls, (None,) + args)
class RequiredUnlessDev(tuple, object):
"""Container for a required parameter declaration
Only required in dev
Example::
cfg = pkconfig.init(
maybe_needed=pkconfig.RequiredUnlessDev('dev default', str, 'A parameter with a default'),
)
Args:
dev_default (object): value compatible with type
converter (callable): how to string to internal value
docstring (str): description of parameter
"""
@staticmethod
def __new__(cls, *args):
assert len(args) == 3, \
'{}: len(args)!=3'.format(args)
if channel_in('dev'):
return args
return Required(args[1], args[2])
def append_load_path(load_path):
"""DEPRECATED"""
pass
def channel_in(*args, **kwargs):
"""Test against configured channel
Args:
args (str): list of channels to valid
channel (str): channel to test (default: [cfg.channel])
Returns:
bool: True if current channel in ``args``
"""
if not cfg:
_coalesce_values()
res = False
to_test = cfg.channel
if kwargs and kwargs['channel']:
to_test = kwargs['channel']
assert to_test in VALID_CHANNELS, \
'{}: invalid channel keyword arg'.format(to_test)
for a in args:
assert a in VALID_CHANNELS, \
'{}: invalid channel to test'.format(a)
if a == to_test:
res = True
return res
def channel_in_internal_test(channel=None):
"""Is this a internal test channel?
Args:
channel (str): channel to test (default: [cfg.channel])
Returns:
bool: True if current channel in (alpha, dev)
"""
return channel_in(*INTERNAL_TEST_CHANNELS, channel=channel)
def init(**kwargs):
"""Declares and initializes config params for calling module.
Args:
kwargs (dict): param name to (default, parser, docstring)
Returns:
Params: `PKDict` populated with param values
"""
if '_caller_module' in kwargs:
# Internal use only: _values() calls init() to initialize pkconfig.cfg
m = kwargs['_caller_module']
del kwargs['_caller_module']
else:
if pkinspect.is_caller_main():
print(
'pkconfig.init() called from __main__; cannot configure, ignoring',
file=sys.stderr)
return None
m = pkinspect.caller_module()
mnp = m.__name__.split('.')
for k in reversed(mnp):
kwargs = {k: kwargs}
decls = {}
_flatten_keys([], kwargs, decls)
_coalesce_values()
res = PKDict()
_iter_decls(decls, res)
for k in mnp:
res = res[k]
return res
def flatten_values(base, new):
"""Merge flattened values into base
Keys are made all lowercase.
Lists instances are prepended and not recursively merged.
Args:
base (object): dict-like that is already flattened
new (object): dict-like that will be flattened and overriden
Returns:
dict: modified `base`a
"""
new_values = {}
_flatten_keys([], new, new_values)
#TODO(robnagler) Verify that a value x_y_z isn't set when x_y
# exists already as a None. The other way is ok, because it
# clears the value unless of course it's not a dict
# then it would be a type collision
for k in sorted(new_values.keys()):
n = new_values[k]
if k in base:
b = base[k]
if isinstance(b, list) or isinstance(n, list):
if b is None or n is None:
pass
elif isinstance(b, list) and isinstance(n, list):
n.extend(b)
else:
raise_error(
'{}: type mismatch between new value ({}) and base ({})'.format(
k.msg, n, b),
)
base[k] = n
return base
def parse_none(func):
"""Decorator for a parser which can parse None
Args:
callable: function to be decorated
Returns:
callable: func with attr indicating it can parse None
"""
setattr(func, _PARSE_NONE_ATTR, True)
return func
@parse_none
def parse_bool(value):
"""Default parser for `bool` types
When the parser is `bool`, it will be replaced with this routine,
which parses strings and None specially. `bool` values
cannot be defaulted to `None`. They must be True or False.
String values which return true: t, true, y, yes, 1.
False values: f, false, n, no, 0, '', None
Other values are parsed by `bool`.
Args:
value (object): to be parsed
Returns:
bool: True or False
"""
if value is None:
return False
if isinstance(value, bool):
return value
if not isinstance(value, STRING_TYPES):
return bool(value)
v = value.lower()
if v in ('t', 'true', 'y', 'yes', '1'):
return True
if v in ('f', 'false', 'n', 'no', '0', ''):
return False
raise_error('unknown boolean value={}'.format(value))
def parse_bytes(value):
"""Parse bytes in `int` or n[KMGT]B? formats
Args:
value (object): to be parsed
Returns:
int: non-negative number of bytes
"""
if isinstance(value, int):
if value < 0:
raise_error('bytes may not be negative value={}'.format(value))
return value
if not isinstance(value, str):
raise_error('bytes must be int or str value={}'.format(value))
m = _PARSE_BYTES.search(value)
if not m:
raise_error('bytes must match n[KMGT]B? value={}'.format(value))
v = int(m.group(1))
x = m.group(2)
if x:
v *= _PARSE_BYTES_MULTIPLIER[x.lower()];
return v
def parse_seconds(value):
"""Parse seconds in `int` or DdH:M:S formats
Args:
value (object): to be parsed
Returns:
int: non-negative number of seconds
"""
if isinstance(value, int):
if value < 0:
raise_error('seconds may not be negative value={}'.format(value))
return value
if not isinstance(value, str):
raise_error('seconds must be int or str value={}'.format(value))
m = _PARSE_SECONDS.search(value)
if not m or not any(m.groups()):
raise_error('seconds must match [Dd][[[H:]M:]S] value={}'.format(value))
v = 0
for x, i in zip((86400, 3600, 60, 1), m.groups()):
if i is not None:
v += int(i) * x
return v
#: deprecated version of parse_seconds
parse_secs = parse_seconds
@parse_none
def parse_set(value):
"""Default parser for `set` and `frozenset` types
When the parser is `set` or `frozenset`, it will be replaced with this routine,
which parses strings, lists, and sets. It splits strings on ':'.
Args:
value (object): to be parsed
Returns:
frozenset: may be an empty set
"""
return frozenset(parse_tuple(value))
@parse_none
def parse_tuple(value):
"""Default parser for `tuple` types
When the parser is `tuple`, it will be replaced with this routine,
which parses strings, lists, sets, and tuples. It splits strings on ':'.
Args:
value (object): to be parsed
Returns:
tuple: may be an empty tuple
"""
if value is None:
return tuple()
if isinstance(value, tuple):
return value
if isinstance(value, (list, set, frozenset)):
return tuple(value)
assert isinstance(value, STRING_TYPES), \
'unable to convert type={} to tuple; value={}'.format(type(value), value)
return tuple(value.split(TUPLE_SEP))
def raise_error(msg):
"""Call when there is a config problem"""
raise AssertionError(msg)
def reset_state_for_testing(add_to_environ=None):
"""Clear the raw values and append add_to_environ
Only used for unit tests. ``add_to_environ`` overrides previous
value.
Args:
add_to_environ (dict): values to augment to os.environ
"""
global _raw_values, _add_to_environ
_raw_values = None
_add_to_environ = copy.deepcopy(add_to_environ)
def to_environ(cfg_keys, values=None, exclude_re=None):
"""Export config (key, values) as dict for environ
cfg_keys is a list of dotted words (``['pykern.pkdebug.control']``).
Simple globs (``pykern.pkdebug.*``) are supported, which match
any word character. This can be used to ensure there is enough depth,
e.g. pykern.*.* means there must be at least two undercores in
the environment variable.
Only environ and add_to_environ config will be considered, not
default values, which will be assumed to be processed the same
way in a subprocess using this environ.
Args:
cfg_keys (iter): keys to find values for
values (mapping): use for configuration to parse [actual config]
exclude_re (object): compile re or str to ignore matches
Returns:
PKDict: keys and values (str)
"""
c = flatten_values({}, values) if values else _coalesce_values()
res = PKDict()
if exclude_re and isinstance(exclude_re, STRING_TYPES):
exclude_re = re.compile(exclude_re, flags=re.IGNORECASE)
def a(k, v):
if exclude_re and exclude_re.search(k):
return
if not isinstance(v, STRING_TYPES):
if v is None:
v = ''
elif isinstance(v, bool):
v = '1' if v else ''
elif isinstance(v, (frozenset, list, set, tuple)):
v = TUPLE_SEP.join(v)
else:
v = str(v)
res[k.upper()] = v
for k in cfg_keys:
k = k.lower().replace('.', '_')
if '*' not in k:
if k in c:
a(k, c[k])
continue
r = re.compile(k.replace('*', r'\w+'), flags=re.IGNORECASE)
for x, v in c.items():
if r.search(x):
a(x, v)
return res
class _Declaration(object):
"""Initialize a single parameter declaration
Args:
name (str): for error output
value (tuple or dict): specification for parameter
Attributes:
default (object): value to be assigned if not explicitly configured
docstring (str): documentation for the parameter
group (Group): None or Group instance
parser (callable): how to parse a configured value
required (bool): the param must be explicitly configured
"""
def __init__(self, value):
if isinstance(value, dict):
self.group = value
self.parser = None
self.default = None
self.docstring = ''
#TODO(robnagler) _group_has_required(value)
self.required = False
return
assert len(value) == 3, \
'{}: declaration must be a 3-tuple'.format(value)
self.default = value[0]
self.parser = value[1]
self.docstring = value[2]
assert callable(self.parser), \
'{}: parser must be a callable: '.format(self.parser, self.docstring)
self.group = None
self.required = isinstance(value, Required)
self._fixup_parser()
def _fixup_parser(self):
if self.parser == bool:
t = (int,)
self.parser = parse_bool
elif self.parser == tuple:
t = (tuple,)
self.parser = parse_tuple
elif self.parser in (set, frozenset):
t = (frozenset, set, tuple)
self.parser = parse_set
else:
return
if self.required:
return
# better error message than what parser might put out
assert isinstance(self.default, t), \
'default={} must be a type={} docstring={}'.format(
self.default,
[str(x) for x in t],
self.docstring,
)
# validate the default
self.default = self.parser(self.default)
class _Key(str, object):
"""Internal representation of a key for a value
The str value is lowercase joined with ``_``. For debugging,
``msg`` is printed (original case, joined on '.'). The parts
are saved for creating nested values.
"""
@staticmethod
def __new__(cls, parts):
self = super(_Key, cls).__new__(cls, '_'.join(parts).lower())
self.parts = parts
self.msg = '.'.join(parts)
return self
def _clean_environ():
"""Ensure os.environ keys are valid (no bash function names)
Also sets empty string to `None`.
Returns:
dict: copy of a cleaned up `os.environ`
"""
res = {}
env = copy.copy(os.environ)
if _add_to_environ:
env.update(_add_to_environ)
for k in env:
if KEY_RE.search(k):
res[k] = env[k] if len(env[k]) > 0 else None
#TODO(robnagler) this makes it easier to set debugging, but it's a hack
if 'pkdebug' in env and 'PYKERN_PKDEBUG_CONTROL' not in env:
env['PYKERN_PKDEBUG_CONTROL'] = env['pkdebug']
return res
def _coalesce_values():
"""Coalesce environ and add_to_environ
Sets up channel.
Returns:
dict: raw values
"""
global _raw_values, _parsed_values
global cfg
if _raw_values:
return _raw_values
values = {}
env = _clean_environ()
flatten_values(values, env)
channel = values.get(CHANNEL_ATTR, CHANNEL_DEFAULT)
assert channel in VALID_CHANNELS, \
'{}: invalid ${}; must be {}'.format(
channel, CHANNEL_ATTR.upper(), VALID_CHANNELS)
values[CHANNEL_ATTR] = channel
_raw_values = values
_parsed_values = dict(((_Key([k]), v) for k, v in env.items()))
cfg = init(
_caller_module=sys.modules[__name__],
channel=Required(str, 'which (stage) function returns config'),
)
return _raw_values
def _flatten_keys(key_parts, values, res):
"""Turns values into non-nested dict with `_Key` keys, flat
Args:
key_parts (list): call with ``[]``
values (dict): nested dicts of config values
res (dict): result container (call with ``{}``)
"""
for k in values:
v = values[k]
k = _Key(key_parts + k.split('.'))
assert KEY_RE.search(k), \
'{}: invalid key must match {}'.format(k.msg, KEY_RE)
assert not k in res, \
'{}: duplicate key'.format(k.msg)
if isinstance(v, dict):
_flatten_keys(k.parts, v, res)
else:
# Only store leaves
res[k] = v
def _iter_decls(decls, res):
"""Iterates decls and resolves values into res
Args:
decls (dict): nested dictionary of a module's cfg values
res (PKDict): result configuration for module
"""
for k in sorted(decls.keys()):
#TODO(robnagler) deal with keys with '.' in them (not possible?)
d = _Declaration(decls[k])
r = res
for kp in k.parts[:-1]:
if kp not in r:
r[kp] = PKDict()
r = r[kp]
kp = k.parts[-1]
if d.group:
r[kp] = PKDict()
continue
r[kp] = _resolver(d)(k, d)
_parsed_values[k] = r[kp]
def _resolver(decl):
"""How to resolve values for declaration
Args:
decl (_Declaration): what to resolve
Returns:
callable: `_resolve_dict`, `_resolve_list`, or `_resolve_value`
"""
if dict == decl.parser:
return _resolve_dict
if list == decl.parser:
return _resolve_list
return _resolve_value
def _resolve_dict(key, decl):
#TODO(robnagler) assert "required"
res = PKDict(
copy.deepcopy(decl.default) if decl.default else {})
assert isinstance(res, dict), \
'{}: default ({}) must be a dict'.format(key.msg, decl.default)
key_prefix = key + '_'
for k in reversed(sorted(_raw_values.keys())):
if k != key and not k.startswith(key_prefix):
continue
r = res
if len(k.parts) == 1:
# os.environ has only one part (no way to split on '.')
# so we have to assign the key's suffix manually
ki = k.parts[0][len(key_prefix):]
#TODO(robnagler) if key exists, preserve case (only for environ)
else:
kp = k.parts[len(key.parts):-1]
for k2 in kp:
if not k2 in r:
r[k2] = PKDict()
else:
assert isinstance(r[k2], dict), \
'{}: type collision on existing non-dict ({}={})'.format(
k.msg, k2, r[k2])
r = r[k2]
ki = k.parts[-1]
r[ki] = _raw_values[k]
return res
def _resolve_list(key, decl):
#TODO(robnagler) assert required
res = copy.deepcopy(decl.default) if decl.default else []
assert isinstance(res, list), \
'{}: default ({}) must be a list'.format(key.msg, decl.default)
if key not in _raw_values:
assert not decl.required, \
'{}: config value missing and is required'.format(k)
return res
if not isinstance(_raw_values[key], list):
if _raw_values[key] is None:
return None
raise_error(
'{}: value ({}) must be a list or None'.format(key.msg, _raw_values[key]),
)
return _raw_values[key] + res
def _resolve_value(key, decl):
if key in _raw_values:
res = _raw_values[key]
else:
assert not decl.required, \
'{}: config value missing and is required'.format(key.msg)
res = decl.default
#TODO(robnagler) FOO_BAR='' will not be evaluated. It may need to be
# if None is not a valid option and there is a default
if res is None and not hasattr(decl.parser, _PARSE_NONE_ATTR):
return None
return decl.parser(res)
def _z(msg):
"""Useful for debugging this module"""
with open('/dev/tty', 'w') as f:
f.write(str(msg) + '\n')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) Ostap developers.
# =============================================================================
# @file ostap/fitting/tests/test_fitting_models2_2D.py
# Test module for ostap/fitting/models_2d.py
# - It tests various 2D-non-factrorizeable models
# =============================================================================
""" Test module for ostap/fitting/models_2d.py
- It tests various 2D-non-factrorizeable models
"""
# =============================================================================
from __future__ import print_function
# =============================================================================
import ROOT, random
import ostap.fitting.roofit
import ostap.fitting.models as Models
from ostap.core.core import Ostap, std, VE, dsID
from ostap.logger.utils import rooSilent
import ostap.io.zipshelve as DBASE
from ostap.utils.timing import timing
from builtins import range
from ostap.plotting.canvas import use_canvas
from ostap.utils.utils import wait
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ or '__builtin__' == __name__ :
logger = getLogger ( 'test_fitting_models2_2D' )
else :
logger = getLogger ( __name__ )
# =============================================================================
root_version = ROOT.ROOT.GetROOT().GetVersionInt()
# =============================================================================
## make simple test mass
m_x = ROOT.RooRealVar ( 'mass_x' , 'Some test mass(X)' , 3 , 3.2 )
m_y = ROOT.RooRealVar ( 'mass_y' , 'Some test mass(Y)' , 3 , 3.2 )
## book very simple data set
varset = ROOT.RooArgSet ( m_x , m_y )
dataset = ROOT.RooDataSet ( dsID() , 'Test Data set-1' , varset )
m = VE(3.100,0.015**2)
N_ss = 5000
N_sb = 500
N_bs = 500
N_bb = 1000
random.seed(0)
## fill it : 5000 events Gauss * Gauss
for i in range(0,N_ss) :
m_x.value = m.gauss()
m_y.value = m.gauss()
dataset.add ( varset )
## fill it : 2500 events Gauss * const
for i in range(0,N_sb) :
m_x.value = m.gauss()
m_y.value = random.uniform ( *m_y.minmax() )
dataset.add ( varset )
## fill it : 2500 events const * Gauss
for i in range(0,N_bs) :
m_x.value = random.uniform ( *m_x.minmax() )
m_y.value = m.gauss()
dataset.add ( varset )
## fill it : 5000 events const * const
for i in range(0,N_bb) :
m_x.value = random.uniform ( *m_x.minmax() )
m_y.value = random.uniform ( *m_y.minmax() )
dataset.add ( varset )
logger.info ( 'Dataset:%s ' % dataset )
models = set()
# =============================================================================
signal1 = Models.Gauss_pdf ( 'Gx' , xvar = m_x )
signal2 = Models.Gauss_pdf ( 'Gy' , xvar = m_y )
signal2s = signal1.clone ( name = 'GyS' , xvar = m_y )
signal1.mean = m.value ()
signal1.sigma = m.error ()
signal2.mean = m.value ()
signal2.sigma = m.error ()
# =============================================================================
## gauss as signal, const as background
# =============================================================================
def test_const () :
logger = getLogger ('test_const' )
logger.info ('Simplest (factorized) fit model: ( Gauss + const ) x ( Gauss + const ) ' )
model = Models.Fit2D (
signal_x = signal1 ,
signal_y = signal2s ,
)
## fit with fixed mass and sigma
with rooSilent() , use_canvas ( 'test_const' ) :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % result ( model.SB ) [0] )
logger.info ('B1xS2 : %20s' % result ( model.BS ) [0] )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, second order polynomial as background
# =============================================================================
def test_p2xp2 () :
logger = getLogger ('test_p2xp2' )
logger.info ('Simple (factorized) fit model: ( Gauss + P1 ) (x) ( Gauss + P1 ) ' )
model = Models.Fit2D (
suffix = '_2' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = -1 ,
bkg_1y = -1 ,
bkg_2x = -1 ,
bkg_2y = -1 ,
)
## fit with fixed mass and sigma
with rooSilent() , use_canvas ( 'test_p2xp2') :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % result ( model.SB ) [0] )
logger.info ('B1xS2 : %20s' % result ( model.BS ) [0] )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, 1st order polynomial as background + non-factorizeable BB
# =============================================================================
def test_p1xp1_BB () :
logger = getLogger ( 'test_p1xp1_BB' )
logger.info ('Simplest non-factorized fit model: ( Gauss + P1 ) (x) ( Gauss + P1 ) + BB' )
model = Models.Fit2D (
suffix = '_3' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = -1 ,
bkg_1y = -1 ,
bkg_2D = Models.PolyPos2D_pdf ( 'P2D' , m_x , m_y , nx = 2 , ny = 2 )
)
## fit with fixed mass and sigma
with rooSilent() , use_canvas ( 'test_p1xp1_BB') :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % result ( model.SB ) [0] )
logger.info ('B1xS2 : %20s' % result ( model.BS ) [0] )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, 1st order polynomial as background
# =============================================================================
def test_p1xp1_BBs () :
logger = getLogger ( 'test_p1xp1_BBs' )
logger.info ('Non-factorized symmetric background fit model: ( Gauss + P1 ) (x) ( Gauss + P1 ) + BBsym' )
model = Models.Fit2D (
suffix = '_4' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = -1 ,
bkg_1y = -1 ,
bkg_2D = Models.PolyPos2Dsym_pdf ( 'P2Ds' , m_x , m_y , n = 2 )
)
## fit with fixed mass and sigma
with rooSilent() , use_canvas ( 'test_p1xp1_BBs') :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % result ( model.SB ) [0] )
logger.info ('B1xS2 : %20s' % result ( model.BS ) [0] )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, 1st order polynomial as background
# =============================================================================
def test_p1xp1_BBss () :
logger = getLogger ( 'test_p1xp1_BBss' )
logger.info ('Symmetrised fit model with non-factorized symmetric background: ( Gauss + P1 ) (x) ( Gauss + P1 ) + BBsym' )
sb = ROOT.RooRealVar('sb','SB',2500 , 0,10000)
model = Models.Fit2D (
suffix = '_5' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = -1 ,
bkg_2D = Models.PolyPos2Dsym_pdf ( 'P2Ds' , m_x , m_y , n = 1 ) ,
sb = sb ,
bs = sb
)
model.SS = N_ss
model.BB = N_bb
model.SB = 2500
## fit with fixed mass and sigma
with rooSilent() , use_canvas ( 'test_p1xp1_BBss') :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
model. draw1 ( dataset )
model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % result ( model.SB ) [0] )
logger.info ('B1xS2 : %20s' % result ( model.BS ) [0] )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, 1st order polynomial as background
# =============================================================================
def test_p1xp1_BBsym () :
logger = getLogger ( 'test_p1xp1_BBsym' )
logger.info ('Symmetric non-factorized fit model: ( Gauss + P1 ) (x) ( Gauss + P1 ) + BBsym' )
sb = ROOT.RooRealVar('sb','SB',0,10000)
model = Models.Fit2DSym (
suffix = '_6' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = -1 ,
bkg_2D = Models.PolyPos2Dsym_pdf ( 'P2D5' , m_x , m_y , n = 2 ) ,
)
## fit with fixed mass and sigma
with rooSilent() :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % ( result ( model.SB ) [0] /2 ) )
logger.info ('B1xS2 : %20s' % ( result ( model.BS ) [0] /2 ) )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, expo times 1st order polynomial as background
# =============================================================================
def test_pbxpb_BB () :
logger = getLogger ( 'test_pbxpb_BB' )
logger.info ('Non-factorizeable background component: ( Gauss + expo*P1 ) (x) ( Gauss + expo*P1 ) + (expo*P1)**2')
model = Models.Fit2D (
suffix = '_7' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = 1 ,
bkg_1y = 1 ,
bkg_2D = Models.ExpoPol2D_pdf ( 'P2D7' , m_x , m_y , nx = 1 , ny = 1 )
)
model.bkg_1x .tau .fix ( 0 )
model.bkg_1y .tau .fix ( 0 )
## fit with fixed mass and sigma
with rooSilent() , use_canvas ( 'test_pbxpb_BB') :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % result ( model.SB ) [0] )
logger.info ('B1xS2 : %20s' % result ( model.BS ) [0] )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, expo times 1st order polynomial as background
# =============================================================================
def test_pbxpb_BBs () :
logger = getLogger ( 'test_pbxpb_BBs' )
logger.info ('Non-factorizeable background component: ( Gauss + expo*P1 ) (x) ( Gauss + expo*P1 ) + Sym(expo*P1)**2')
model = Models.Fit2D (
suffix = '_8' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = 1 ,
bkg_1y = 1 ,
bkg_2D = Models.ExpoPol2Dsym_pdf ( 'P2D8' , m_x , m_y , n = 1 )
)
model.bkg_1x .tau .fix ( 0 )
model.bkg_1y .tau .fix ( 0 )
## fit with fixed mass and sigma
with rooSilent() , use_canvas ('test_pbxpb_BBs' ) :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
model. draw1 ( dataset )
model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % result ( model.SB ) [0] )
logger.info ('B1xS2 : %20s' % result ( model.BS ) [0] )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, expo times 1st order polynomial as background
# =============================================================================
def test_pbxpb_BBsym () :
logger = getLogger ( 'test_pbxpb_BBsym' )
logger.info ('Symmetric fit model with non-factorizeable background component: ( Gauss + P1 ) (x) ( Gauss + P1 ) + Sym(expo*P1)**2')
model = Models.Fit2DSym (
suffix = '_9' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = -1 ,
bkg_2D = Models.ExpoPol2Dsym_pdf ( 'P2D9' , m_x , m_y , n = 1 )
)
## fit with fixed mass and sigma
with rooSilent() , use_canvas ('test_pbxpb_BBsym' ) :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % ( result ( model.SB ) [0] /2 ) )
logger.info ('B1xS2 : %20s' % ( result ( model.BS ) [0] /2 ) )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, expo times 1st order polynomial as background
# =============================================================================
def test_psxps_BBs () :
logger = getLogger ( 'test_psxps_BBs' )
logger.info ('Non-factorizeable symmetric background component: ( Gauss + P1 ) (x) ( Gauss + P1 ) + (PS*P1)**2')
PS = Ostap.Math.PhaseSpaceNL( 1.0 , 5.0 , 2 , 5 )
model = Models.Fit2D (
suffix = '_11' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = -1 ,
bkg_1y = -1 ,
bkg_2D = Models.PSPol2Dsym_pdf ( 'P2D11' , m_x , m_y , ps = PS , n = 1 )
)
## fit with fixed mass and sigma
with rooSilent() , use_canvas ('test_psxps_BBs' ) :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % result ( model.SB ) [0] )
logger.info ('B1xS2 : %20s' % result ( model.BS ) [0] )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## gauss as signal, expo times 1st order polynomial as background
# =============================================================================
def test_psxps_BBsym () :
logger = getLogger ( 'test_psxps_BBsym' )
logger.info ('Simmetric fit model with non-factorizeable background component: ( Gauss + P1 ) (x) ( Gauss + P1 ) + (PS*P1)**2')
PS = Ostap.Math.PhaseSpaceNL( 1.0 , 5.0 , 2 , 5 )
model = Models.Fit2DSym (
suffix = '_12' ,
signal_x = signal1 ,
signal_y = signal2s ,
bkg_1x = -1 ,
bkg_2D = Models.PSPol2Dsym_pdf ( 'P2D12' , m_x , m_y , ps = PS , n = 1 )
)
## fit with fixed mass and sigma
with rooSilent() , use_canvas ( 'test_psxps_BBsym' ) :
result, frame = model. fitTo ( dataset )
model.signal_x.sigma.release ()
model.signal_y.sigma.release ()
model.signal_x.mean .release ()
model.signal_y.mean .release ()
result, frame = model. fitTo ( dataset )
with wait ( 1 ) : model. draw1 ( dataset )
with wait ( 1 ) : model. draw2 ( dataset )
if 0 != result.status() or 3 != result.covQual() :
logger.warning('Fit is not perfect MIGRAD=%d QUAL=%d '
% ( result.status() , result.covQual() ) )
print(result)
else :
logger.info ('S1xS2 : %20s' % result ( model.SS ) [0] )
logger.info ('S1xB2 : %20s' % ( result ( model.SB ) [0] /2 ) )
logger.info ('B1xS2 : %20s' % ( result ( model.BS ) [0] /2 ) )
logger.info ('B1xB2 : %20s' % result ( model.BB ) [0] )
models.add ( model )
# =============================================================================
## check that everything is serializable
# =============================================================================
def test_db() :
with timing ( 'Save everything to DBASE' , logger ), DBASE.tmpdb() as db :
db['m_x' ] = m_x
db['m_y' ] = m_y
db['vars' ] = varset
for m in models : db[ 'model:' + m.name ] = m
db['models' ] = models
db['dataset' ] = dataset
db.ls()
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.timing import timing
with timing ('test_const' , logger ) : test_const ()
with timing ('test_p2xp2' , logger ) : test_p2xp2 ()
with timing ('test_p1xp1_BB' , logger ) : test_p1xp1_BB ()
with timing ('test_p1xp1_BBss' , logger ) : test_p1xp1_BBss ()
with timing ('test_p1xp1_BBsym' , logger ) : test_p1xp1_BBsym ()
with timing ('test_pbxpb_BB' , logger ) : test_pbxpb_BB ()
with timing ('test_pbxpb_BBs' , logger ) : test_pbxpb_BBs ()
with timing ('test_pbxpb_BBsym' , logger ) : test_pbxpb_BBsym ()
with timing ('test_psxps_BBs' , logger ) : test_psxps_BBs ()
with timing ('test_psxps_BBsym' , logger ) : test_psxps_BBsym ()
with timing ('Save to DB' ) : test_db ()
# =============================================================================
## The END
# =============================================================================
| |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API VM utility module to build SOAP object specs.
"""
import collections
import copy
import functools
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as vutil
import six
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.network import model as network_model
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
ALL_SUPPORTED_NETWORK_DEVICES = ['VirtualE1000', 'VirtualE1000e',
'VirtualPCNet32', 'VirtualSriovEthernetCard',
'VirtualVmxnet', 'VirtualVmxnet3']
# A simple cache for storing inventory folder references.
# Format: {inventory_path: folder_ref}
_FOLDER_PATH_REF_MAPPING = {}
# A cache for VM references. The key will be the VM name
# and the value is the VM reference. The VM name is unique. This
# is either the UUID of the instance or UUID-rescue in the case
# that this is a rescue VM. This is in order to prevent
# unnecessary communication with the backend.
_VM_REFS_CACHE = {}
class Limits(object):
def __init__(self, limit=None, reservation=None,
shares_level=None, shares_share=None):
"""imits object holds instance limits for convenience."""
self.limit = limit
self.reservation = reservation
self.shares_level = shares_level
self.shares_share = shares_share
def validate(self):
if self.shares_level in ('high', 'normal', 'low'):
if self.shares_share:
reason = _("Share level '%s' cannot have share "
"configured") % self.shares_level
raise exception.InvalidInput(reason=reason)
return
if self.shares_level == 'custom':
return
if self.shares_level:
reason = _("Share '%s' is not supported") % self.shares_level
raise exception.InvalidInput(reason=reason)
def has_limits(self):
return bool(self.limit or
self.reservation or
self.shares_level)
class ExtraSpecs(object):
def __init__(self, cpu_limits=None, hw_version=None,
storage_policy=None, cores_per_socket=None,
memory_limits=None, disk_io_limits=None,
vif_limits=None):
"""ExtraSpecs object holds extra_specs for the instance."""
self.cpu_limits = cpu_limits or Limits()
self.memory_limits = memory_limits or Limits()
self.disk_io_limits = disk_io_limits or Limits()
self.vif_limits = vif_limits or Limits()
self.hw_version = hw_version
self.storage_policy = storage_policy
self.cores_per_socket = cores_per_socket
def vm_refs_cache_reset():
global _VM_REFS_CACHE
_VM_REFS_CACHE = {}
def vm_ref_cache_delete(id):
_VM_REFS_CACHE.pop(id, None)
def vm_ref_cache_update(id, vm_ref):
_VM_REFS_CACHE[id] = vm_ref
def vm_ref_cache_get(id):
return _VM_REFS_CACHE.get(id)
def _vm_ref_cache(id, func, session, data):
vm_ref = vm_ref_cache_get(id)
if not vm_ref:
vm_ref = func(session, data)
vm_ref_cache_update(id, vm_ref)
return vm_ref
def vm_ref_cache_from_instance(func):
@functools.wraps(func)
def wrapper(session, instance):
id = instance.uuid
return _vm_ref_cache(id, func, session, instance)
return wrapper
def vm_ref_cache_from_name(func):
@functools.wraps(func)
def wrapper(session, name):
id = name
return _vm_ref_cache(id, func, session, name)
return wrapper
# the config key which stores the VNC port
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
VmdkInfo = collections.namedtuple('VmdkInfo', ['path', 'adapter_type',
'disk_type',
'capacity_in_bytes',
'device'])
def _iface_id_option_value(client_factory, iface_id, port_index):
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % port_index
opt.value = iface_id
return opt
def _get_allocation_info(client_factory, limits, allocation_type):
allocation = client_factory.create(allocation_type)
if limits.limit:
allocation.limit = limits.limit
else:
# Set as 'unlimited'
allocation.limit = -1
if limits.reservation:
allocation.reservation = limits.reservation
else:
allocation.reservation = 0
shares = client_factory.create('ns0:SharesInfo')
if limits.shares_level:
shares.level = limits.shares_level
if (shares.level == 'custom' and
limits.shares_share):
shares.shares = limits.shares_share
else:
shares.shares = 0
else:
shares.level = 'normal'
shares.shares = 0
# The VirtualEthernetCardResourceAllocation has 'share' instead of
# 'shares'.
if hasattr(allocation, 'share'):
allocation.share = shares
else:
allocation.shares = shares
return allocation
def get_vm_create_spec(client_factory, instance, data_store_name,
vif_infos, extra_specs,
os_type=constants.DEFAULT_OS_TYPE,
profile_spec=None, metadata=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = instance.uuid
config_spec.guestId = os_type
# The name is the unique identifier for the VM.
config_spec.instanceUuid = instance.uuid
if metadata:
config_spec.annotation = metadata
# set the Hardware version
config_spec.version = extra_specs.hw_version
# Allow nested hypervisor instances to host 64 bit VMs.
if os_type in ("vmkernel5Guest", "vmkernel6Guest", "vmkernel65Guest",
"windowsHyperVGuest"):
config_spec.nestedHVEnabled = "True"
# Append the profile spec
if profile_spec:
config_spec.vmProfile = [profile_spec]
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance.vcpus)
if extra_specs.cores_per_socket:
config_spec.numCoresPerSocket = int(extra_specs.cores_per_socket)
config_spec.memoryMB = int(instance.memory_mb)
# Configure cpu information
if extra_specs.cpu_limits.has_limits():
config_spec.cpuAllocation = _get_allocation_info(
client_factory, extra_specs.cpu_limits,
'ns0:ResourceAllocationInfo')
# Configure memory information
if extra_specs.memory_limits.has_limits():
config_spec.memoryAllocation = _get_allocation_info(
client_factory, extra_specs.memory_limits,
'ns0:ResourceAllocationInfo')
devices = []
for vif_info in vif_infos:
vif_spec = _create_vif_spec(client_factory, vif_info,
extra_specs.vif_limits)
devices.append(vif_spec)
serial_port_spec = create_serial_port_spec(client_factory)
if serial_port_spec:
devices.append(serial_port_spec)
config_spec.deviceChange = devices
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance.uuid
extra_config.append(opt)
# enable to provide info needed by udev to generate /dev/disk/by-id
opt = client_factory.create('ns0:OptionValue')
opt.key = "disk.EnableUUID"
opt.value = True
extra_config.append(opt)
port_index = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
extra_config.append(_iface_id_option_value(client_factory,
vif_info['iface_id'],
port_index))
port_index += 1
if (CONF.vmware.console_delay_seconds and
CONF.vmware.console_delay_seconds > 0):
opt = client_factory.create('ns0:OptionValue')
opt.key = 'keyboard.typematicMinDelay'
opt.value = CONF.vmware.console_delay_seconds * 1000000
extra_config.append(opt)
config_spec.extraConfig = extra_config
# Set the VM to be 'managed' by 'OpenStack'
managed_by = client_factory.create('ns0:ManagedByInfo')
managed_by.extensionKey = constants.EXTENSION_KEY
managed_by.type = constants.EXTENSION_TYPE_INSTANCE
config_spec.managedBy = managed_by
return config_spec
def create_serial_port_spec(client_factory):
"""Creates config spec for serial port."""
if not CONF.vmware.serial_port_service_uri:
return
backing = client_factory.create('ns0:VirtualSerialPortURIBackingInfo')
backing.direction = "server"
backing.serviceURI = CONF.vmware.serial_port_service_uri
backing.proxyURI = CONF.vmware.serial_port_proxy_uri
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
serial_port = client_factory.create('ns0:VirtualSerialPort')
serial_port.connectable = connectable_spec
serial_port.backing = backing
# we are using unique negative integers as temporary keys
serial_port.key = -2
serial_port.yieldOnPoll = True
dev_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
dev_spec.operation = "add"
dev_spec.device = serial_port
return dev_spec
def get_vm_boot_spec(client_factory, device):
"""Returns updated boot settings for the instance.
The boot order for the instance will be changed to have the
input device as the boot disk.
"""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
boot_disk = client_factory.create(
'ns0:VirtualMachineBootOptionsBootableDiskDevice')
boot_disk.deviceKey = device.key
boot_options = client_factory.create('ns0:VirtualMachineBootOptions')
boot_options.bootOrder = [boot_disk]
config_spec.bootOptions = boot_options
return config_spec
def get_vm_resize_spec(client_factory, vcpus, memory_mb, extra_specs,
metadata=None):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = vcpus
resize_spec.memoryMB = memory_mb
resize_spec.cpuAllocation = _get_allocation_info(
client_factory, extra_specs.cpu_limits,
'ns0:ResourceAllocationInfo')
if metadata:
resize_spec.annotation = metadata
return resize_spec
def create_controller_spec(client_factory, key,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
bus_number=0):
"""Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == constants.ADAPTER_TYPE_BUSLOGIC:
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == constants.ADAPTER_TYPE_LSILOGICSAS:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
elif adapter_type == constants.ADAPTER_TYPE_PARAVIRTUAL:
virtual_controller = client_factory.create(
'ns0:ParaVirtualSCSIController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = bus_number
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def convert_vif_model(name):
"""Converts standard VIF_MODEL types to the internal VMware ones."""
if name == network_model.VIF_MODEL_E1000:
return 'VirtualE1000'
if name == network_model.VIF_MODEL_E1000E:
return 'VirtualE1000e'
if name == network_model.VIF_MODEL_PCNET:
return 'VirtualPCNet32'
if name == network_model.VIF_MODEL_SRIOV:
return 'VirtualSriovEthernetCard'
if name == network_model.VIF_MODEL_VMXNET:
return 'VirtualVmxnet'
if name == network_model.VIF_MODEL_VMXNET3:
return 'VirtualVmxnet3'
if name not in ALL_SUPPORTED_NETWORK_DEVICES:
msg = _('%s is not supported.') % name
raise exception.Invalid(msg)
return name
def _create_vif_spec(client_factory, vif_info, vif_limits=None):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
vif_info['vif_model'] = convert_vif_model(vif_info['vif_model'])
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing = client_factory.create(
'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo')
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
# Configure externalId
if network_ref['use-external-id']:
# externalId is only supported from vCenter 6.0 onwards
if hasattr(net_device, 'externalId'):
net_device.externalId = vif_info['iface_id']
else:
dp = client_factory.create('ns0:DynamicProperty')
dp.name = "__externalId__"
dp.val = vif_info['iface_id']
net_device.dynamicProperty = [dp]
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing = client_factory.create(
'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo')
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
if 'dvs_port_key' in network_ref:
portgroup.portKey = network_ref['dvs_port_key']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
# vnic limits are only supported from version 6.0
if vif_limits and vif_limits.has_limits():
if hasattr(net_device, 'resourceAllocation'):
net_device.resourceAllocation = _get_allocation_info(
client_factory, vif_limits,
'ns0:VirtualEthernetCardResourceAllocation')
else:
msg = _('Limits only supported from vCenter 6.0 and above')
raise exception.Invalid(msg)
network_spec.device = net_device
return network_spec
def get_network_attach_config_spec(client_factory, vif_info, index,
vif_limits=None):
"""Builds the vif attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
vif_spec = _create_vif_spec(client_factory, vif_info, vif_limits)
config_spec.deviceChange = [vif_spec]
if vif_info['iface_id'] is not None:
config_spec.extraConfig = [_iface_id_option_value(client_factory,
vif_info['iface_id'],
index)]
return config_spec
def get_network_detach_config_spec(client_factory, device, port_index):
"""Builds the vif detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
config_spec.deviceChange = [virtual_device_config]
# If a key is already present then it cannot be deleted, only updated.
# This enables us to reuse this key if there is an additional
# attachment. The keys need to be preserved. This is due to the fact
# that there is logic on the ESX that does the network wiring
# according to these values. If they are changed then this will
# break networking to and from the interface.
config_spec.extraConfig = [_iface_id_option_value(client_factory,
'free',
port_index)]
return config_spec
def get_storage_profile_spec(session, storage_policy):
"""Gets the vm profile spec configured for storage policy."""
profile_id = pbm.get_profile_id_by_name(session, storage_policy)
if profile_id:
client_factory = session.vim.client.factory
storage_profile_spec = client_factory.create(
'ns0:VirtualMachineDefinedProfileSpec')
storage_profile_spec.profileId = profile_id.uniqueId
return storage_profile_spec
def get_vmdk_attach_config_spec(client_factory,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None,
disk_io_limits=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = _create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name, disk_io_limits)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
controller_key,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in six.iteritems(extra_opts):
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def _get_device_capacity(device):
# Devices pre-vSphere-5.5 only reports capacityInKB, which has
# rounding inaccuracies. Use that only if the more accurate
# attribute is absent.
if hasattr(device, 'capacityInBytes'):
return device.capacityInBytes
else:
return device.capacityInKB * units.Ki
def _get_device_disk_type(device):
if getattr(device.backing, 'thinProvisioned', False):
return constants.DISK_TYPE_THIN
else:
if getattr(device.backing, 'eagerlyScrub', False):
return constants.DISK_TYPE_EAGER_ZEROED_THICK
else:
return constants.DEFAULT_DISK_TYPE
def get_vmdk_info(session, vm_ref, uuid=None):
"""Returns information for the primary VMDK attached to the given VM."""
hardware_devices = session._call_method(vutil,
"get_object_property",
vm_ref,
"config.hardware.device")
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
capacity_in_bytes = 0
# Determine if we need to get the details of the root disk
root_disk = None
root_device = None
if uuid:
root_disk = '%s.vmdk' % uuid
vmdk_device = None
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
path = ds_obj.DatastorePath.parse(device.backing.fileName)
if root_disk and path.basename == root_disk:
root_device = device
vmdk_device = device
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = constants.DEFAULT_ADAPTER_TYPE
elif device.__class__.__name__ == "VirtualBusLogicController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_BUSLOGIC
elif device.__class__.__name__ == "VirtualIDEController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_IDE
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_LSILOGICSAS
elif device.__class__.__name__ == "ParaVirtualSCSIController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_PARAVIRTUAL
if root_disk:
vmdk_device = root_device
if vmdk_device:
vmdk_file_path = vmdk_device.backing.fileName
capacity_in_bytes = _get_device_capacity(vmdk_device)
vmdk_controller_key = vmdk_device.controllerKey
disk_type = _get_device_disk_type(vmdk_device)
adapter_type = adapter_type_dict.get(vmdk_controller_key)
return VmdkInfo(vmdk_file_path, adapter_type, disk_type,
capacity_in_bytes, vmdk_device)
scsi_controller_classes = {
'ParaVirtualSCSIController': constants.ADAPTER_TYPE_PARAVIRTUAL,
'VirtualLsiLogicController': constants.DEFAULT_ADAPTER_TYPE,
'VirtualLsiLogicSASController': constants.ADAPTER_TYPE_LSILOGICSAS,
'VirtualBusLogicController': constants.ADAPTER_TYPE_BUSLOGIC,
}
def get_scsi_adapter_type(hardware_devices):
"""Selects a proper iscsi adapter type from the existing
hardware devices
"""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ in scsi_controller_classes:
# find the controllers which still have available slots
if len(device.device) < constants.SCSI_MAX_CONNECT_NUMBER:
# return the first match one
return scsi_controller_classes[device.__class__.__name__]
raise exception.StorageError(
reason=_("Unable to find iSCSI Target"))
def _find_controller_slot(controller_keys, taken, max_unit_number):
for controller_key in controller_keys:
for unit_number in range(max_unit_number):
if unit_number not in taken.get(controller_key, []):
return controller_key, unit_number
def _is_ide_controller(device):
return device.__class__.__name__ == 'VirtualIDEController'
def _is_scsi_controller(device):
return device.__class__.__name__ in ['VirtualLsiLogicController',
'VirtualLsiLogicSASController',
'VirtualBusLogicController',
'ParaVirtualSCSIController']
def _find_allocated_slots(devices):
"""Return dictionary which maps controller_key to list of allocated unit
numbers for that controller_key.
"""
taken = {}
for device in devices:
if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'):
unit_numbers = taken.setdefault(device.controllerKey, [])
unit_numbers.append(device.unitNumber)
if _is_scsi_controller(device):
# the SCSI controller sits on its own bus
unit_numbers = taken.setdefault(device.key, [])
unit_numbers.append(device.scsiCtlrUnitNumber)
return taken
def _get_bus_number_for_scsi_controller(devices):
"""Return usable bus number when create new SCSI controller."""
# Every SCSI controller will take a unique bus number
taken = [dev.busNumber for dev in devices if _is_scsi_controller(dev)]
# The max bus number for SCSI controllers is 3
for i in range(constants.SCSI_MAX_CONTROLLER_NUMBER):
if i not in taken:
return i
msg = _('Only %d SCSI controllers are allowed to be '
'created on this instance.') % constants.SCSI_MAX_CONTROLLER_NUMBER
raise vexc.VMwareDriverException(msg)
def allocate_controller_key_and_unit_number(client_factory, devices,
adapter_type):
"""This function inspects the current set of hardware devices and returns
controller_key and unit_number that can be used for attaching a new virtual
disk to adapter with the given adapter_type.
"""
if devices.__class__.__name__ == "ArrayOfVirtualDevice":
devices = devices.VirtualDevice
taken = _find_allocated_slots(devices)
ret = None
if adapter_type == constants.ADAPTER_TYPE_IDE:
ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)]
ret = _find_controller_slot(ide_keys, taken, 2)
elif adapter_type in constants.SCSI_ADAPTER_TYPES:
scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)]
ret = _find_controller_slot(scsi_keys, taken, 16)
if ret:
return ret[0], ret[1], None
# create new controller with the specified type and return its spec
controller_key = -101
# Get free bus number for new SCSI controller.
bus_number = 0
if adapter_type in constants.SCSI_ADAPTER_TYPES:
bus_number = _get_bus_number_for_scsi_controller(devices)
controller_spec = create_controller_spec(client_factory, controller_key,
adapter_type, bus_number)
return controller_key, 0, controller_spec
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_vmdk_create_spec(client_factory, size_in_kb,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def _create_virtual_disk_spec(client_factory, controller_key,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None,
disk_io_limits=None):
"""Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == constants.DISK_TYPE_THIN:
disk_file_backing.thinProvisioned = True
else:
if disk_type == constants.DISK_TYPE_EAGER_ZEROED_THICK:
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
if disk_io_limits and disk_io_limits.has_limits():
virtual_disk.storageIOAllocation = _get_allocation_info(
client_factory, disk_io_limits,
'ns0:StorageIOAllocationInfo')
virtual_device_config.device = virtual_disk
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False, config=None):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
if config is not None:
clone_spec.config = config
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing"):
"""Builds the VM relocation spec."""
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
if host:
rel_spec.host = host
return rel_spec
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
port_group_name, vlan_id):
"""Builds the virtual switch port group add spec."""
vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
vswitch_port_group_spec.name = port_group_name
vswitch_port_group_spec.vswitchName = vswitch_name
# VLAN ID of 0 means that VLAN tagging is not to be done for the network.
vswitch_port_group_spec.vlanId = int(vlan_id)
policy = client_factory.create('ns0:HostNetworkPolicy')
nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
nicteaming.notifySwitches = True
policy.nicTeaming = nicteaming
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
opt_keymap = client_factory.create('ns0:OptionValue')
opt_keymap.key = "RemoteDisplay.vnc.keyMap"
opt_keymap.value = CONF.vnc.keymap
extras = [opt_enabled, opt_port, opt_keymap]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
def get_vnc_port(session):
"""Return VNC port for an VM or None if there is no available port."""
min_port = CONF.vmware.vnc_port
port_total = CONF.vmware.vnc_port_total
allocated_ports = _get_allocated_vnc_ports(session)
max_port = min_port + port_total
for port in range(min_port, max_port):
if port not in allocated_ports:
return port
raise exception.ConsolePortRangeExhausted(min_port=min_port,
max_port=max_port)
def _get_allocated_vnc_ports(session):
"""Return an integer set of all allocated VNC ports."""
# TODO(rgerganov): bug #1256944
# The VNC port should be unique per host, not per vCenter
vnc_ports = set()
result = session._call_method(vim_util, "get_objects",
"VirtualMachine", [VNC_CONFIG_KEY])
while result:
for obj in result.objects:
if not hasattr(obj, 'propSet'):
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
result = session._call_method(vutil, 'continue_retrieval',
result)
return vnc_ports
def _get_object_for_value(results, value):
for object in results.objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_for_optionvalue(results, value):
for object in results.objects:
if hasattr(object, "propSet") and object.propSet:
if object.propSet[0].val.value == value:
return object.obj
def _get_object_from_results(session, results, value, func):
while results:
object = func(results, value)
if object:
session._call_method(vutil, 'cancel_retrieval',
results)
return object
results = session._call_method(vutil, 'continue_retrieval',
results)
def _get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
@vm_ref_cache_from_name
def get_vm_ref_from_name(session, vm_name):
return (_get_vm_ref_from_vm_uuid(session, vm_name) or
_get_vm_ref_from_name(session, vm_name))
def _get_vm_ref_from_uuid(session, instance_uuid):
"""Get reference to the VM with the uuid specified.
This method reads all of the names of the VM's that are running
on the backend, then it filters locally the matching
instance_uuid. It is far more optimal to use
_get_vm_ref_from_vm_uuid.
"""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_value)
def _get_vm_ref_from_vm_uuid(session, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
This method finds all VM's on the backend that match the
instance_uuid, more specifically all VM's on the backend that have
'config_spec.instanceUuid' set to 'instance_uuid'.
"""
vm_refs = session._call_method(
session.vim,
"FindAllByUuid",
session.vim.service_content.searchIndex,
uuid=instance_uuid,
vmSearch=True,
instanceUuid=True)
if vm_refs:
return vm_refs[0]
def _get_vm_ref_from_extraconfig(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_optionvalue)
@vm_ref_cache_from_instance
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
uuid = instance.uuid
vm_ref = (search_vm_ref_by_identifier(session, uuid) or
_get_vm_ref_from_name(session, instance.name))
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=uuid)
return vm_ref
def search_vm_ref_by_identifier(session, identifier):
"""Searches VM reference using the identifier.
This method is primarily meant to separate out part of the logic for
vm_ref search that could be use directly in the special case of
migrating the instance. For querying VM linked to an instance always
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
_get_vm_ref_from_extraconfig(session, identifier) or
_get_vm_ref_from_uuid(session, identifier))
return vm_ref
def get_host_ref_for_vm(session, instance):
"""Get a MoRef to the ESXi host currently running an instance."""
vm_ref = get_vm_ref(session, instance)
return session._call_method(vutil, "get_object_property",
vm_ref, "runtime.host")
def get_host_name_for_vm(session, instance):
"""Get the hostname of the ESXi host currently running an instance."""
host_ref = get_host_ref_for_vm(session, instance)
return session._call_method(vutil, "get_object_property",
host_ref, "name")
def get_vm_state(session, instance):
vm_ref = get_vm_ref(session, instance)
vm_state = session._call_method(vutil, "get_object_property",
vm_ref, "runtime.powerState")
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
vcpus = 0
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vutil,
"get_object_properties_dict",
cluster,
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
if (runtime_summary.inMaintenanceMode is False and
runtime_summary.connectionState == "connected"):
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
vcpus += hardware_summary.numCpuThreads
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vutil, "get_object_property",
res_mor, "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'vcpus': vcpus, 'mem': mem_info}
return stats
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
session._call_method(vutil, 'cancel_retrieval',
results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vutil, "get_object_property",
cluster, "host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
| http://pubs.vmware.com/vsphere-51/index.jsp
| #com.vmware.wssdk.apiref.doc/
| vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
return {prop.name: prop.val for prop in propset}
def get_vmdk_backed_disk_device(hardware_devices, uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster):
"""Get the resource pool."""
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vutil,
"get_object_property",
cluster,
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
session._call_method(vutil, 'cancel_retrieval',
results)
if results.objects is None:
return []
else:
return results.objects
except Exception as excep:
LOG.warning(_LW("Failed to get cluster references %s"), excep)
def get_cluster_ref_by_name(session, cluster_name):
"""Get reference to the vCenter cluster with the specified name."""
all_clusters = get_all_cluster_mors(session)
for cluster in all_clusters:
if (hasattr(cluster, 'propSet') and
cluster.propSet[0].val == cluster_name):
return cluster.obj
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic & ParaVirtual
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type in [constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
vmdk_adapter_type = constants.DEFAULT_ADAPTER_TYPE
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
"""Create VM on ESX host."""
LOG.debug("Creating VM on the ESX host", instance=instance)
vm_create_task = session._call_method(
session.vim,
"CreateVM_Task", vm_folder,
config=config_spec, pool=res_pool_ref)
try:
task_info = session._wait_for_task(vm_create_task)
except vexc.VMwareDriverException:
# An invalid guestId will result in an error with no specific fault
# type and the generic error 'A specified parameter was not correct'.
# As guestId is user-editable, we try to help the user out with some
# additional information if we notice that guestId isn't in our list of
# known-good values.
# We don't check this in advance or do anything more than warn because
# we can't guarantee that our list of known-good guestIds is complete.
# Consequently, a value which we don't recognise may in fact be valid.
with excutils.save_and_reraise_exception():
if config_spec.guestId not in constants.VALID_OS_TYPES:
LOG.warning(_LW('vmware_ostype from image is not recognised: '
'\'%(ostype)s\'. An invalid os type may be '
'one cause of this instance creation failure'),
{'ostype': config_spec.guestId})
LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result
def destroy_vm(session, instance, vm_ref=None):
"""Destroy a VM instance. Assumes VM is powered off."""
try:
if not vm_ref:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Destroying the VM", instance=instance)
destroy_task = session._call_method(session.vim, "Destroy_Task",
vm_ref)
session._wait_for_task(destroy_task)
LOG.info(_LI("Destroyed the VM"), instance=instance)
except Exception:
LOG.exception(_LE('Destroy VM failed'), instance=instance)
def create_virtual_disk(session, dc_ref, adapter_type, disk_type,
virtual_disk_path, size_in_kb):
# Create a Virtual Disk of the size of the flat vmdk file. This is
# done just to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
LOG.debug("Creating Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the data store",
{"vmdk_file_size_in_kb": size_in_kb,
"adapter_type": adapter_type})
vmdk_create_spec = get_vmdk_create_spec(
session.vim.client.factory,
size_in_kb,
adapter_type,
disk_type)
vmdk_create_task = session._call_method(
session.vim,
"CreateVirtualDisk_Task",
session.vim.service_content.virtualDiskManager,
name=virtual_disk_path,
datacenter=dc_ref,
spec=vmdk_create_spec)
session._wait_for_task(vmdk_create_task)
LOG.debug("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB and type %(disk_type)s",
{"vmdk_file_size_in_kb": size_in_kb,
"disk_type": disk_type})
def copy_virtual_disk(session, dc_ref, source, dest):
"""Copy a sparse virtual disk to a thin virtual disk.
This is also done to generate the meta-data file whose specifics
depend on the size of the disk, thin/thick provisioning and the
storage adapter type.
:param session: - session for connection
:param dc_ref: - data center reference object
:param source: - source datastore path
:param dest: - destination datastore path
:returns: None
"""
LOG.debug("Copying Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
vim = session.vim
vmdk_copy_task = session._call_method(
vim,
"CopyVirtualDisk_Task",
vim.service_content.virtualDiskManager,
sourceName=source,
sourceDatacenter=dc_ref,
destName=dest)
session._wait_for_task(vmdk_copy_task)
LOG.debug("Copied Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
def reconfigure_vm(session, vm_ref, config_spec):
"""Reconfigure a VM according to the config spec."""
reconfig_task = session._call_method(session.vim,
"ReconfigVM_Task", vm_ref,
spec=config_spec)
session._wait_for_task(reconfig_task)
def power_on_instance(session, instance, vm_ref=None):
"""Power on the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering on the VM", instance=instance)
try:
poweron_task = session._call_method(
session.vim,
"PowerOnVM_Task", vm_ref)
session._wait_for_task(poweron_task)
LOG.debug("Powered on the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered on", instance=instance)
def _get_vm_port_indices(session, vm_ref):
extra_config = session._call_method(vutil,
'get_object_property',
vm_ref,
'config.extraConfig')
ports = []
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value != 'free'):
ports.append(int(option.key.split('.')[2]))
return ports
def get_attach_port_index(session, vm_ref):
"""Get the first free port index."""
ports = _get_vm_port_indices(session, vm_ref)
# No ports are configured on the VM
if not ports:
return 0
ports.sort()
configured_ports_len = len(ports)
# Find the first free port index
for port_index in range(configured_ports_len):
if port_index != ports[port_index]:
return port_index
return configured_ports_len
def get_vm_detach_port_index(session, vm_ref, iface_id):
extra_config = session._call_method(vutil,
'get_object_property',
vm_ref,
'config.extraConfig')
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value == iface_id):
return int(option.key.split('.')[2])
def power_off_instance(session, instance, vm_ref=None):
"""Power off the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering off the VM", instance=instance)
try:
poweroff_task = session._call_method(session.vim,
"PowerOffVM_Task", vm_ref)
session._wait_for_task(poweroff_task)
LOG.debug("Powered off the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered off", instance=instance)
def find_rescue_device(hardware_devices, instance):
"""Returns the rescue device.
The method will raise an exception if the rescue device does not
exist. The resuce device has suffix '-rescue.vmdk'.
:param hardware_devices: the hardware devices for the instance
:param instance: nova.objects.instance.Instance object
:return: the rescue disk device object
"""
for device in hardware_devices.VirtualDevice:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
'VirtualDiskFlatVer2BackingInfo' and
device.backing.fileName.endswith('-rescue.vmdk')):
return device
msg = _('Rescue device does not exist for instance %s') % instance.uuid
raise exception.NotFound(msg)
def get_ephemeral_name(id):
return 'ephemeral_%d.vmdk' % id
def _detach_and_delete_devices_config_spec(client_factory, devices):
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
for device in devices:
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
virtual_device_config.fileOperation = "destroy"
device_config_spec.append(virtual_device_config)
config_spec.deviceChange = device_config_spec
return config_spec
def detach_devices_from_vm(session, vm_ref, devices):
"""Detach specified devices from VM."""
client_factory = session.vim.client.factory
config_spec = _detach_and_delete_devices_config_spec(
client_factory, devices)
reconfigure_vm(session, vm_ref, config_spec)
def get_ephemerals(session, vm_ref):
devices = []
hardware_devices = session._call_method(vutil,
"get_object_property",
vm_ref,
"config.hardware.device")
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if (device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo"):
if 'ephemeral' in device.backing.fileName:
devices.append(device)
return devices
def get_swap(session, vm_ref):
hardware_devices = session._call_method(vutil, "get_object_property",
vm_ref, "config.hardware.device")
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
'swap' in device.backing.fileName):
return device
def _get_folder(session, parent_folder_ref, name):
# Get list of child entities for the parent folder
prop_val = session._call_method(vutil, 'get_object_property',
parent_folder_ref,
'childEntity')
if prop_val:
child_entities = prop_val.ManagedObjectReference
# Return if the child folder with input name is already present
for child_entity in child_entities:
if child_entity._type != 'Folder':
continue
child_entity_name = vim_util.get_entity_name(session, child_entity)
if child_entity_name == name:
return child_entity
def create_folder(session, parent_folder_ref, name):
"""Creates a folder in vCenter
A folder of 'name' will be created under the parent folder.
The moref of the folder is returned.
"""
folder = _get_folder(session, parent_folder_ref, name)
if folder:
return folder
LOG.debug("Creating folder: %(name)s. Parent ref: %(parent)s.",
{'name': name, 'parent': parent_folder_ref.value})
try:
folder = session._call_method(session.vim, "CreateFolder",
parent_folder_ref, name=name)
LOG.info(_LI("Created folder: %(name)s in parent %(parent)s."),
{'name': name, 'parent': parent_folder_ref.value})
except vexc.DuplicateName as e:
LOG.debug("Folder already exists: %(name)s. Parent ref: %(parent)s.",
{'name': name, 'parent': parent_folder_ref.value})
val = e.details['object']
folder = vutil.get_moref(val, 'Folder')
return folder
def folder_ref_cache_update(path, folder_ref):
_FOLDER_PATH_REF_MAPPING[path] = folder_ref
def folder_ref_cache_get(path):
return _FOLDER_PATH_REF_MAPPING.get(path)
def _get_vm_name(display_name, id):
if display_name:
return '%s (%s)' % (display_name[:41], id[:36])
else:
return id[:36]
def rename_vm(session, vm_ref, instance):
vm_name = _get_vm_name(instance.display_name, instance.uuid)
rename_task = session._call_method(session.vim, "Rename_Task", vm_ref,
newName=vm_name)
session._wait_for_task(rename_task)
| |
#!/usr/bin/env python
# Contributors:
# Christopher P. Barnes <senrabc@gmail.com>
# Andrei Sura: github.com/indera
# Mohan Das Katragadda <mohan.das142@gmail.com>
# Philip Chase <philipbchase@gmail.com>
# Ruchi Vivek Desai <ruchivdesai@gmail.com>
# Taeber Rapczak <taeber@ufl.edu>
# Nicholas Rejack <nrejack@ufl.edu>
# Josh Hanna <josh@hanna.io>
# Copyright (c) 2014-2015, University of Florida
# All rights reserved.
#
# Distributed under the BSD 3-Clause License
# For full text of the BSD 3-Clause License see http://opensource.org/licenses/BSD-3-Clause
import unittest
import os
import logging
import shutil
import tempfile
import pysftp
from mock import patch
from redi.utils import GetEmrData
from redi.utils.GetEmrData import EmrFileAccessDetails
import time
from subprocess import Popen
# Try to silence extra info
logging.getLogger("paramico").addHandler(logging.NullHandler)
logging.getLogger("pysftp").addHandler(logging.NullHandler)
class TestGetEMRData(unittest.TestCase):
def setUp(self):
pass
def _noop(*args, **kwargs):
pass
@patch.multiple(pysftp, Connection=_noop)
@patch.multiple(GetEmrData, download_files=_noop)
def test_get_emr_data(self):
"""
This test verifies only that the csv file on the sftp server
can be transformed to an xml file.
Note: This test is not concerned with testing the sftp communication"""
temp_folder = tempfile.mkdtemp('/')
temp_txt_file = os.path.join(temp_folder, "raw.txt")
temp_escaped_file = os.path.join(temp_folder, "rawEscaped.txt")
temp_xml_file = os.path.join(temp_folder, "raw.xml")
input_string = '''"NAME","COMPONENT_ID","RESULT","REFERENCE_UNIT","DATE_TIME_STAMP","STUDY_ID"
"RNA","1905","<5","IU/mL","1907-05-21 05:50:00","999-0059"
"EGFR","1740200","eGFR result is => 60 ml/min/1.73M2","ml/min/1.73M2","1903-11-27 15:13:00","999-0059"
"HEMATOCRIT","1534436",">27&<30","%","","999-0059"'''
with open(temp_txt_file, 'w+') as f:
f.write(input_string)
props = EmrFileAccessDetails(
emr_sftp_project_name='/',
emr_download_list='raw.csv',
emr_host='localhost',
emr_username='admin',
emr_password='admin',
emr_port='7788',
emr_private_key=None,
emr_private_key_pass=None,
)
GetEmrData.get_emr_data(temp_folder, props)
GetEmrData.data_preprocessing(temp_txt_file, temp_escaped_file)
GetEmrData.generate_xml(temp_escaped_file, temp_xml_file)
with open(temp_xml_file) as f:
result = f.read()
expected = '''<?xml version="1.0" encoding="utf8"?>
<study>
<subject>
<NAME>RNA</NAME>
<COMPONENT_ID>1905</COMPONENT_ID>
<RESULT><5</RESULT>
<REFERENCE_UNIT>IU/mL</REFERENCE_UNIT>
<DATE_TIME_STAMP>1907-05-21 05:50:00</DATE_TIME_STAMP>
<STUDY_ID>999-0059</STUDY_ID>
</subject>
<subject>
<NAME>EGFR</NAME>
<COMPONENT_ID>1740200</COMPONENT_ID>
<RESULT>eGFR result is => 60 ml/min/1.73M2</RESULT>
<REFERENCE_UNIT>ml/min/1.73M2</REFERENCE_UNIT>
<DATE_TIME_STAMP>1903-11-27 15:13:00</DATE_TIME_STAMP>
<STUDY_ID>999-0059</STUDY_ID>
</subject>
<subject>
<NAME>HEMATOCRIT</NAME>
<COMPONENT_ID>1534436</COMPONENT_ID>
<RESULT>>27&<30</RESULT>
<REFERENCE_UNIT>%</REFERENCE_UNIT>
<DATE_TIME_STAMP></DATE_TIME_STAMP>
<STUDY_ID>999-0059</STUDY_ID>
</subject>
</study>
'''
self.assertEqual(result, expected)
shutil.rmtree(temp_folder)
def test_pysftp_using_rsa_key(self):
"""
Starts a sftp server and transfers a file
to verify the connection using a private key.
Notes:
- Disble this test if running on the Travis VM fails
by adding one of the following annotations:
@unittest.skip("Unconditional skipping")
@unittest.skipIf(os.getenv('CI', '') > '', reason='Travis VM')
- Dependency: `sudo easy_install sftpserver`
"""
# Using a temp folder does not work for some reason
#temp_folder = tempfile.mkdtemp('/')
temp_folder = "."
key_file = os.path.join(temp_folder, 'unittest_pysftp_rsa_key')
key_file = create_rsa_key(key_file)
source_file = os.path.join(temp_folder, 'source_file')
source_file = create_sample_file(source_file)
# At this point the destination file is empty
destination_file = os.path.join(temp_folder, 'destination_file')
proc = None
try:
sftp_cmd = "sftpserver --host localhost -p 7788 -l DEBUG -k " + key_file
proc = Popen(sftp_cmd, shell=True)
time.sleep(1) # let the server start
try:
# this block depends on a running sftp server
connection_info = get_connection_info(key_file)
connection_info['port'] = int(connection_info['port'])
with pysftp.Connection(**connection_info) as sftp:
logging.info("User %s connected to sftp server %s" % \
(connection_info['username'], connection_info['host']))
#print sftp.listdir('.')
sftp.get(source_file, destination_file)
except Exception as e:
logging.error("Problem connecting to: %s due error: %s" % \
(connection_info['host'], e))
except Exception as e:
logging.error("Problem starting sftp server due error: %s for file: %s" % \
(e, destination_file))
finally:
if proc:
proc.terminate()
with open(destination_file) as fresh_file:
actual = fresh_file.read()
self.assertEqual(actual, "SFTP TEST")
# Cleanup created files
os.remove(source_file)
os.remove(destination_file)
os.remove(key_file)
os.remove(key_file +".pub")
def create_rsa_key(rsa_key_file):
"""Create a RSA private key pair to be used by sftp"""
if not os.path.isfile(rsa_key_file):
cmd = "ssh-keygen -q -t rsa -N '' -f " + rsa_key_file
proc = Popen(cmd, shell=True)
time.sleep(1)
proc.terminate()
else:
logging.warn("RSA key file already exists: %s" % rsa_key_file)
return rsa_key_file
def create_sample_file(sample_file):
""" Create a sample file to be transfered ofe sftp"""
if not os.path.isfile(sample_file):
try:
f = open(sample_file, 'w+')
f.write("SFTP TEST")
f.flush()
f.close()
except IOError as (errno, strerror):
logging.error("I/O error({0}): {1}".format(errno, strerror))
else:
logging.info("Sample file %s already exists" % sample_file)
return sample_file
def get_connection_info(private_key):
"""Return a dictionary of parameters for creating a sftp connection"""
access_details = EmrFileAccessDetails(
emr_sftp_project_name='/',
emr_download_list='raw.csv',
emr_host='localhost',
emr_username='admin',
emr_password='admin',
emr_port='7788',
emr_private_key=private_key,
emr_private_key_pass=None,
)
connection_info = dict(access_details.__dict__)
# delete unnecessary elements form the dictionary
del connection_info['sftp_project_name']
del connection_info['download_list']
return connection_info
if __name__ == '__main__':
unittest.main()
| |
import imp
import os
import re
import tempfile
import shutil
from mock import *
from gp_unittest import *
from gppylib.gparray import Segment, GpArray
from gppylib.db.dbconn import UnexpectedRowsError
from pygresql import pgdb
cursor_keys = dict(
normal_tables=re.compile(".*n\.nspname, c\.relname, c\.relstorage.*c\.oid NOT IN \( SELECT parchildrelid.*"),
partition_tables=re.compile(".*n\.nspname, c\.relname, c\.relstorage(?!.*SELECT parchildrelid).*"),
relations=re.compile(".*select relname from pg_class r.*"),
table_info=re.compile(".*select is_nullable, data_type, character_maximum_length,.*"),
partition_info=re.compile(".*select parkind, parlevel, parnatts, paratts.*"),
schema_name=re.compile(".*SELECT spcname FROM pg_catalog.pg_tablespace.*"),
create_schema=re.compile(".*CREATE SCHEMA.*"),
ordinal_pos=re.compile(".*select ordinal_position from.*"),
attname=re.compile(".*SELECT attname.*"),
)
class GpTransfer(GpTestCase):
TEMP_DIR = "/tmp/test_unit_gptransfer"
def setUp(self):
if not os.path.exists(self.TEMP_DIR):
os.makedirs(self.TEMP_DIR)
# because gptransfer does not have a .py extension,
# we have to use imp to import it
# if we had a gptransfer.py, this is equivalent to:
# import gptransfer
# self.subject = gptransfer
gptransfer_file = os.path.abspath(os.path.dirname(__file__) + "/../../../gptransfer")
self.subject = imp.load_source('gptransfer', gptransfer_file)
self.subject.logger = Mock(spec=['log', 'warn', 'info', 'debug', 'error', 'warning'])
self.gparray = self.createGpArrayWith2Primary2Mirrors()
self.db_connection = MagicMock()
# TODO: We should be using a spec here, but I haven't been able to narrow down exactly which call
# is causing an attribute error when using the spec.
# The error is occuring because we don't mock out every possible SQL command, and some get swallowed
# (which is fine so far), but to fully support specs
# we need to go through and mock all the SQL calls
# self.db_connection = MagicMock(spec=["__exit__", "close", "__enter__", "commit", "rollback"])
self.cursor = MagicMock(spec=pgdb.pgdbCursor)
self.db_singleton = Mock()
self.workerpool = MagicMock()
self.workerpool.work_queue.qsize.return_value = 0
self.apply_patches([
patch('os.environ', new={"GPHOME": "my_gp_home"}),
patch('gptransfer.connect', return_value=self.db_connection),
patch('gppylib.gparray.GpArray.initFromCatalog', return_value=self.gparray),
patch('gptransfer.getUserDatabaseList', return_value=[["my_first_database"], ["my_second_database"]]),
patch('gppylib.db.dbconn.connect', return_value=self.db_connection),
patch('gptransfer.WorkerPool', return_value=self.workerpool),
patch('gptransfer.doesSchemaExist', return_value=False),
patch('gptransfer.dropSchemaIfExist'),
patch('gptransfer.execSQL', new=self.cursor),
patch('gptransfer.execSQLForSingletonRow', new=self.db_singleton),
patch("gppylib.commands.unix.FileDirExists.remote", return_value=True),
patch("gptransfer.wait_for_pool", return_value=([], [])),
])
# We have a GIGANTIC class that uses 31 arguments, so pre-setting this
# here
self.GpTransferCommand_args = dict(
name='foo',
src_host='foo',
src_port='foo',
src_user='foo',
dest_host='foo',
dest_port='foo',
dest_user='foo',
table_pair='foo',
dest_exists='foo',
truncate='foo',
analyze='foo',
drop='foo',
fast_mode='foo',
exclusive_lock='foo',
schema_only='foo',
work_dir='foo',
host_map='foo',
source_config='foo',
batch_size='foo',
gpfdist_port='foo',
gpfdist_last_port='foo',
gpfdist_instance_count='foo',
gpfdist_verbosity='foo',
max_line_length='foo',
timeout='foo',
wait_time='foo',
delimiter='foo',
validator='foo',
format='foo',
quote='foo',
table_transfer_set_total='foo')
self.GpTransfer_options_defaults = dict(
analyze=False,
base_port=8000,
batch_size=2,
databases=[],
delimiter=',',
dest_database=None,
dest_host='127.0.0.1',
dest_port=5432,
dest_user='gpadmin',
drop=False,
dry_run=False,
enable_test=False,
exclude_input_file=None,
exclude_tables=[],
exclusive_lock=False,
force_standard_mode=False,
format='CSV',
full=False,
input_file=None,
interactive=False,
last_port=-1,
logfileDirectory=None,
max_gpfdist_instances=1,
max_line_length=10485760,
no_final_count_validation=False,
partition_transfer=False,
partition_transfer_non_pt_target=False,
quiet=None,
quote='\x01',
schema_only=False,
skip_existing=False,
source_host='127.0.0.1',
source_map_file=None,
source_port=5432,
source_user='gpadmin',
sub_batch_size=25,
tables=[],
timeout=300,
truncate=False,
validator=None,
verbose=None,
gpfdist_verbose=False,
gpfdist_very_verbose=False,
wait_time=3,
work_base_dir='/home/gpadmin/',
)
def tearDown(self):
shutil.rmtree(self.TEMP_DIR)
super(GpTransfer, self).tearDown()
def test__GpCreateGpfdist(self):
cmd = self.subject.GpCreateGpfdist(
'gpfdist for table %s on %s' % ("some table",
"some segment"),
"some dirname",
"some datafile",
0,
1,
2,
3,
"pid_file",
"log_file",
ctxt=self.subject.REMOTE,
remoteHost="address")
self.assertEqual('nohup gpfdist -d some dirname -p 0 -P 1 -m 2 -t 3 > log_file 2>&1 < /dev/null & echo \\$! ' \
'> pid_file && bash -c "(sleep 1 && kill -0 \\`cat pid_file 2> /dev/null\\` && cat log_file) ' \
'|| (cat log_file >&2 && exit 1)"', cmd.cmdStr)
def test__GpCreateGpfdist_with_verbose(self):
cmd = self.subject.GpCreateGpfdist(
'gpfdist for table %s on %s' % ("some table",
"some segment"),
"some dirname",
"some datafile",
0,
1,
2,
3,
"pid_file",
"log_file",
ctxt = self.subject.REMOTE,
remoteHost = "address",
verbosity = "-v ")
self.assertEqual('nohup gpfdist -d some dirname -p 0 -P 1 -m 2 -t 3 -v > log_file 2>&1 < /dev/null & echo \\$! ' \
'> pid_file && bash -c "(sleep 1 && kill -0 \\`cat pid_file 2> /dev/null\\` && cat log_file) ' \
'|| (cat log_file >&2 && exit 1)"', cmd.cmdStr)
def test__GpCreateGpfdist_with_very_verbose(self):
cmd = self.subject.GpCreateGpfdist(
'gpfdist for table %s on %s' % ("some table",
"some segment"),
"some dirname",
"some datafile",
0,
1,
2,
3,
"pid_file",
"log_file",
ctxt = self.subject.REMOTE,
remoteHost = "address",
verbosity = "-V ")
self.assertEqual('nohup gpfdist -d some dirname -p 0 -P 1 -m 2 -t 3 -V > log_file 2>&1 < /dev/null & echo \\$! ' \
'> pid_file && bash -c "(sleep 1 && kill -0 \\`cat pid_file 2> /dev/null\\` && cat log_file) ' \
'|| (cat log_file >&2 && exit 1)"', cmd.cmdStr)
@patch('os._exit')
def test__cleanup_with_gpfdist_no_verbose_or_very_verbose_does_not_show_gpfdist_warning(self, mock1):
options = self.setup_normal_to_normal_validation()
self.subject.GpTransfer(Mock(**options), []).cleanup()
warnings = self.get_warnings()
self.assertNotIn("gpfdist logs are present in %s on all hosts in the source", warnings)
@patch('os._exit')
def test__cleanup_with_gpfdist_verbose_shows_gpfdist_warning(self, mock1):
options = self.setup_normal_to_normal_validation()
options.update(gpfdist_verbose=True)
self.subject.GpTransfer(Mock(**options), []).cleanup()
warnings = self.get_warnings()
self.assertIn("gpfdist logs are present in %s on all hosts in the source", warnings)
@patch('os._exit')
def test__cleanup_with_gpfdist_very_verbose_shows_gpfdist_warning(self, mock1):
options = self.setup_normal_to_normal_validation()
options.update(gpfdist_very_verbose=True)
self.subject.GpTransfer(Mock(**options), []).cleanup()
warnings = self.get_warnings()
self.assertIn("gpfdist logs are present in %s on all hosts in the source", warnings)
@patch('gptransfer.TableValidatorFactory', return_value=Mock())
def test__get_distributed_by_quotes_column_name(self, mock1):
gptransfer = self.subject
cmd_args = self.GpTransferCommand_args
src_args = ('src', 'public', 'foo', False)
dest_args = ('dest', 'public', 'foo', False)
source_table = gptransfer.GpTransferTable(*src_args)
dest_table = gptransfer.GpTransferTable(*dest_args)
cmd_args['table_pair'] = gptransfer.GpTransferTablePair(source_table, dest_table)
side_effect = CursorSideEffect()
side_effect.append_regexp_key(cursor_keys['attname'], [['escaped_string']])
self.cursor.side_effect = side_effect.cursor_side_effect
table_validator = gptransfer.GpTransferCommand(**cmd_args)
expected_distribution = '''DISTRIBUTED BY ("escaped_string")'''
self.assertEqual(expected_distribution, table_validator._get_distributed_by())
@patch('gptransfer.TableValidatorFactory', return_value=Mock())
def test__get_distributed_by_quotes_multiple_column_names(self, mock1):
gptransfer = self.subject
cmd_args = self.GpTransferCommand_args
src_args = ('src', 'public', 'foo', False)
dest_args = ('dest', 'public', 'foo', False)
source_table = gptransfer.GpTransferTable(*src_args)
dest_table = gptransfer.GpTransferTable(*dest_args)
cmd_args['table_pair'] = gptransfer.GpTransferTablePair(source_table, dest_table)
side_effect = CursorSideEffect()
side_effect.append_regexp_key(cursor_keys['attname'], [['first_escaped_value'], ['second_escaped_value']])
self.cursor.side_effect = side_effect.cursor_side_effect
table_validator = gptransfer.GpTransferCommand(**cmd_args)
expected_distribution = '''DISTRIBUTED BY ("first_escaped_value", "second_escaped_value")'''
self.assertEqual(expected_distribution, table_validator._get_distributed_by())
@patch('gptransfer.TableValidatorFactory', return_value=Mock())
def test__get_distributed_randomly_when_no_distribution_keys(self, mock1):
side_effect = CursorSideEffect()
side_effect.append_regexp_key(cursor_keys['attname'], [])
self.cursor.side_effect = side_effect.cursor_side_effect
table_validator = self._get_gptransfer_command()
expected_distribution = '''DISTRIBUTED RANDOMLY'''
result_distribution = table_validator._get_distributed_by()
self.assertEqual(0, len(self.subject.logger.method_calls))
self.assertEqual(expected_distribution, result_distribution)
@patch('gptransfer.TableValidatorFactory', return_value=Mock())
def test_get_distributed_randomly_handles_exception(self, mock1):
self.cursor.side_effect = ""
table_validator = self._get_gptransfer_command()
expected_distribution = '''DISTRIBUTED RANDOMLY'''
result_distribution = table_validator._get_distributed_by()
self.assertEqual(1, len(self.subject.logger.method_calls))
self.assertEqual(expected_distribution, result_distribution)
def test__normal_transfer_no_tables_does_nothing_but_log(self):
options = self.setup_normal_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write("my_first_database.public.nonexistent_table")
with self.assertRaises(SystemExit):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_info_messages()
self.assertIn("Found no tables to transfer.", log_messages[-1])
def test__normal_transfer_with_tables_validates(self):
options = self.setup_normal_to_normal_validation()
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_info_messages()
self.assertIn("Validating transfer table set...", log_messages)
def test__normal_transfer_when_destination_table_already_exists_fails(self):
options = self.setup_normal_to_normal_validation()
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
}
self.cursor.side_effect = CursorSideEffect(additional=additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "Table my_first_database.public.my_normal_table exists in "
"database my_first_database"):
self.subject.GpTransfer(Mock(**options), [])
def test__normal_transfer_when_input_file_bad_format_comma_fails(self):
options = self.setup_normal_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write("my_first_database.public.my_table, my_second_database.public.my_table")
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
with self.assertRaisesRegexp(Exception, "Destination tables \(comma separated\) are only allowed for "
"partition tables"):
self.subject.GpTransfer(Mock(**options), [])
@patch('gptransfer.CountTableValidator.accumulate', side_effect=Exception('BOOM'))
def test__final_count_validation_when_throws_should_raises_exception(self, mock1):
options = self.setup_normal_to_normal_validation()
with self.assertRaisesRegexp(Exception, "Final count validation failed"):
self.subject.GpTransfer(Mock(**options), []).run()
def test__final_count_invalid_one_src_one_dest_table_logs_error(self):
options = self.setup_normal_to_normal_validation()
additional = {
"SELECT count(*) FROM": [3]
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation failed for %s", self.get_error_logging())
def test__partition_to_partition_final_count_invalid_one_src_one_dest_table_logs_warning(self):
options = self.setup_partition_validation()
additional = {
"SELECT count(*) FROM": [3]
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation failed for %s", self.get_warnings())
def test__partition_to_partition_when_invalid_final_counts_should_warn(self):
options = self.setup_partition_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_table_partition1\n"
"my_first_database.public.my_table_partition2")
additional = {
cursor_keys["partition_tables"]: [["public", "my_table_partition1", ""],
["public", "my_table_partition2", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
multi = {
"SELECT count(*) FROM": [[12], [10]]
}
self.db_singleton.side_effect = SingletonSideEffect(additional_col_list=multi).singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation failed for %s", self.get_warnings())
def test__partition_to_partition_when_valid_final_counts_mult_src_same_dest_table_succeeds(self):
options = self.setup_partition_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_table_partition1\n"
"my_first_database.public.my_table_partition2")
additional = {
cursor_keys["partition_tables"]: [["public", "my_table_partition1", ""],
["public", "my_table_partition2", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation of %s successful", self.get_info_messages())
def test__partition_to_normal_table_succeeds(self):
options = self.setup_partition_to_normal_validation()
# simulate that dest normal table has 0 rows to begin with and 20 when finished
multi = {
"SELECT count(*) FROM": [[20], [0]]
}
self.db_singleton.side_effect = SingletonSideEffect(additional_col_list=multi).singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertNotIn("Validation failed for %s", self.get_warnings())
self.assertIn("Validation of %s successful", self.get_info_messages())
def test__final_count_validation_same_counts_src_dest_passes(self):
options = self.setup_normal_to_normal_validation()
self.subject.GpTransfer(Mock(**options), []).run()
self.assertIn("Validation of %s successful", self.get_info_messages())
def test__validates_good_partition(self):
options = self.setup_partition_validation()
self.subject.GpTransfer(Mock(**options), [])
self.assertIn("Validating partition table transfer set...", self.get_info_messages())
def test__partition_to_nonexistent_partition_fails(self):
options = self.setup_partition_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_table_partition2")
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
with self.assertRaisesRegexp(Exception, "does not exist in destination database when transferring from "
"partition tables .filtering for destination leaf partitions because "
"of option \"--partition-transfer\"."):
self.subject.GpTransfer(Mock(**options), [])
def test__partition_to_nonexistent_normal_table_fails(self):
options = self.setup_partition_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write("my_first_database.public.my_table_partition1, my_first_database.public.does_not_exist")
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
with self.assertRaisesRegexp(Exception, "does not exist in destination database when transferring from "
"partition tables .filtering for destination non-partition tables "
"because of option \"--partition-transfer-non-partition-target\"."):
self.subject.GpTransfer(Mock(**options), [])
def test__partition_to_multiple_same_partition_tables_fails(self):
options = self.setup_partition_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1\n"
"my_first_database.public.my_table_partition3, "
"my_first_database.public.my_table_partition1")
cursor_side_effect = CursorSideEffect()
cursor_side_effect.first_values[cursor_keys["partition_tables"]] = [["public", "my_table_partition1", ""],
["public", "my_table_partition3", ""]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
with self.assertRaisesRegexp(Exception, "Multiple tables map to"):
self.subject.GpTransfer(Mock(**options), [])
def test__partition_to_nonpartition_table_with_different_columns_fails(self):
options = self.setup_partition_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write("my_first_database.public.my_table_partition1, my_first_database.public.my_normal_table")
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
cursor_keys['table_info']: [
[1, "t", "my_new_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
cursor_side_effect.first_values[cursor_keys["partition_tables"]] = [["public", "my_table_partition1", ""]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different column layout or types"):
self.subject.GpTransfer(Mock(**options), [])
def test__multiple_partitions_to_same_normal_table_succeeds(self):
options = self.setup_partition_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_normal_table\n"
"my_first_database.public.my_table_partition2, my_first_database.public.my_normal_table")
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
cursor_side_effect.first_values[cursor_keys["partition_tables"]] = [["public", "my_table_partition1", ""],
["public", "my_table_partition2", ""]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
class SingletonSideEffectWithIterativeReturns(SingletonSideEffect):
def __init__(self):
SingletonSideEffect.__init__(self)
self.values['SELECT count(*) FROM "public"."my_normal_table"'] = [[[30], [15], [15]]]
self.counters['SELECT count(*) FROM "public"."my_normal_table"'] = 0
def singleton_side_effect(self, *args):
for key in self.values.keys():
for arg in args:
if key in arg:
value_list = self.values[key]
result = value_list[self.counters[key] % len(value_list)]
if any(isinstance(item, list) for item in value_list):
result = result[self.counters[key] % len(value_list)]
self.counters[key] += 1
return result
return None
self.db_singleton.side_effect = SingletonSideEffectWithIterativeReturns().singleton_side_effect
self.subject.GpTransfer(Mock(**options), []).run()
self.assertNotIn("Validation failed for %s", self.get_warnings())
self.assertIn("Validation of %s successful", self.get_info_messages())
def test__validate_nonpartition_tables_with_truncate_fails(self):
options = self.setup_partition_to_normal_validation()
options.update(truncate=True)
with self.assertRaisesRegexp(Exception, "--truncate is not allowed with option "
"--partition-transfer-non-partition-target"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_source_not_leaf_fails(self):
options = self.setup_partition_validation()
cursor_side_effect = CursorSideEffect()
cursor_side_effect.first_values[cursor_keys['relations']] = ["my_relname1", "my_relname2"]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
with self.assertRaisesRegexp(Exception, "Source table "):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_partition_when_source_and_dest_have_different_column_count_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['table_info']: [
["t", "my_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"],
["t", "my_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different column layout or types"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_column_type_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['table_info']: [
["t", "my_new_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different column layout or types"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_max_levels_fails(self):
options = self.setup_partition_validation()
additional = {
"select max(p1.partitionlevel)": [2],
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Max level of partition is not same between", log_messages[0])
def test__validate_bad_partition_different_values_of_attributes_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['partition_info']: [["my_parkind", 1, 1, "3 4"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Partition column attributes are different at level", log_messages[0])
def test__validate_partition_transfer_when_different_partition_attributes_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['partition_info']: [["my_parkind", 1, 2, "3 4"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Number of partition columns is different at level", log_messages[0])
def test__validate_bad_partition_different_parent_kind_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['partition_info']: [["different_parkind", 1, "my_parnatts", "my_paratts"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Partition type is different at level", log_messages[0])
def test__validate_bad_partition_different_number_of_attributes_fails(self):
options = self.setup_partition_validation()
additional = {
cursor_keys['partition_info']: [["my_parkind", 1, 2, "my_paratts"]],
}
self.cursor.side_effect = CursorSideEffect(additional).cursor_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Partition type or key is different between", log_messages[1])
self.assertIn("Number of partition columns is different at level ", log_messages[0])
def test__validate_bad_partition_different_partition_values_fails(self):
options = self.setup_partition_validation()
additional = {
"select n.nspname, c.relname": [["not_public", "not_my_table", ""], ["public", "my_table_partition1", ""]],
"select parisdefault, parruleord, parrangestartincl,": ["t", "1", "t", "t", 100, 10, "", ""],
}
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("One of the subpartition table is a default partition", log_messages[0])
self.assertIn("Partition value is different in the partition hierarchy between", log_messages[1])
def test__validate_bad_partition_unknown_type_fails(self):
options = self.setup_partition_validation()
my_singleton = SingletonSideEffect()
my_singleton.values["select partitiontype"] = ["unknown"]
self.db_singleton.side_effect = my_singleton.singleton_side_effect
with self.assertRaisesRegexp(Exception, "Unknown partitioning type "):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_bad_partition_different_list_values_fails(self):
options = self.setup_partition_validation()
additional = {
"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 10, "", "different"],
}
my_singleton = SingletonSideEffect(additional)
my_singleton.values["select partitiontype"] = [["list"]]
self.db_singleton.side_effect = my_singleton.singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("List partition value is different between", log_messages[0])
self.assertIn("Partition value is different in the partition hierarchy between", log_messages[1])
def test__validate_bad_partition_different_range_values_fails(self):
self.run_range_partition_value(
{"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "f", "t", 100, 10, "", "different"]})
self.run_range_partition_value(
{"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "f", 999, 10, "", "different"]})
self.run_range_partition_value(
{"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 999, "", "different"]})
self.run_range_partition_value(
{"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 10, 999, "different"]})
def test__validate_bad_partition_different_parent_partition_fails(self):
options = self.setup_partition_validation()
multi = {
"select parisdefault, parruleord, parrangestartincl,": [["f", "1", "t", "t", 100, 10, "", ""],
["f", "1", "t", "t", 100, 10, "", ""],
["f", "1", "t", "t", 999, 10, "", ""]],
}
singleton_side_effect = SingletonSideEffect(additional_col_list=multi)
self.db_singleton.side_effect = singleton_side_effect.singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
error_messages = self.get_error_logging()
self.assertIn("Range partition value is different between source partition table", error_messages[0])
self.assertIn("Partitions have different parents at level", error_messages[1])
def test__validate_pt_non_pt_target_with_validator__fails(self):
options = self.setup_partition_to_normal_validation()
options['validator'] = "MD5"
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot "
"be used with --validate option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_partition_transfer__fails(self):
options = self.setup_partition_to_normal_validation()
options['partition_transfer'] = True
with self.assertRaisesRegexp(Exception, "--partition-transfer option cannot "
"be used with --partition-transfer-non-partition-target option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_without_input_file__fails(self):
options = self.setup_partition_to_normal_validation()
options['input_file'] = None
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option "
"must be used with -f option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_databases__fails(self):
options = self.setup_partition_to_normal_validation()
options['databases'] = ['db1']
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option "
"cannot be used with -d option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_dest_databases__fails(self):
options = self.setup_partition_to_normal_validation()
options['dest_database'] = ['db1']
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option "
"cannot be used with --dest-database option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_drop__fails(self):
options = self.setup_partition_to_normal_validation()
options['drop'] = True
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option "
"cannot be used with --drop option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_tables__fails(self):
options = self.setup_partition_to_normal_validation()
options['tables'] = ['public.table1']
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option "
"cannot be used with -t option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_schema_only__fails(self):
options = self.setup_partition_to_normal_validation()
options['schema_only'] = True
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option "
"cannot be used with --schema-only option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_full__fails(self):
options = self.setup_partition_to_normal_validation()
options['full'] = True
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option "
"cannot be used with --full option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_exclude_input_file__fails(self):
options = self.setup_partition_to_normal_validation()
options['exclude_input_file'] = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot "
"be used with any exclude table option"):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_pt_non_pt_target_with_exclude_tables__fails(self):
options = self.setup_partition_to_normal_validation()
options['exclude_tables'] = ['public.table1']
with self.assertRaisesRegexp(Exception, "--partition-transfer-non-partition-target option cannot "
"be used with any exclude table option"):
self.subject.GpTransfer(Mock(**options), [])
def test__partition_to_normal_multiple_same_dest_must_come_from_same_source_partition(self):
options = self.setup_partition_to_normal_validation()
with open(options["input_file"], "w") as src_map_file:
src_map_file.write(
"my_first_database.public.my_table_partition1, my_first_database.public.my_normal_table\n"
"my_first_database.public.my_table_partition2, my_first_database.public.my_normal_table")
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
cursor_side_effect.first_values[cursor_keys["partition_tables"]] = [["public", "my_table_partition1", ""],
["public", "my_table_partition2", ""]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
class SingletonSideEffectWithIterativeReturns(SingletonSideEffect):
def __init__(self, multi_value=None):
SingletonSideEffect.__init__(self, additional_col_list=multi_value)
self.values["SELECT count(*) FROM public.my_normal_table"] = [[[30, 15, 15]]]
self.counters["SELECT count(*) FROM public.my_normal_table"] = 0
def singleton_side_effect(self, *args):
for key in self.values.keys():
for arg in args:
if key in arg:
value_list = self.values[key]
result = value_list[self.counters[key] % len(value_list)]
if any(isinstance(i, list) for i in value_list):
result = result[self.counters[key] % len(value_list)]
self.counters[key] += 1
return result
return None
multi_value = {
"select n.nspname, c.relname": [["public", "my_table_partition1"], ["public", "other_parent"]]
}
self.db_singleton.side_effect = SingletonSideEffectWithIterativeReturns(multi_value=multi_value).singleton_side_effect
with self.assertRaisesRegexp(Exception, "partition sources: public.my_table_partition1, "
"public.my_table_partition2, when transferred to "
"the same destination: table public.my_normal_table , "
"must share the same parent"):
self.subject.GpTransfer(Mock(**options), []).run()
def test__validating_transfer_with_empty_source_map_file_raises_proper_exception(self):
options = self.setup_partition_to_normal_validation()
source_map_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
source_map_filename.write("")
source_map_filename.flush()
options.update(
source_map_file=source_map_filename.name
)
with self.assertRaisesRegexp(Exception, "No hosts in map"):
self.subject.GpTransfer(Mock(**options), [])
def test__row_count_validation_escapes_schema_and_table_names(self):
escaped_query = 'SELECT count(*) FROM "escapedSchema"."escapedTable"'
table_mock = Mock(spec=['escapedSchema','escapedTable'])
table_mock.schema = 'escapedSchema'
table_mock.table = 'escapedTable'
table_pair = Mock(spec=['source','dest'])
table_pair.source = table_mock
table_pair.dest = table_mock
validator = self.subject.CountTableValidator('some_work_dir', table_pair, 'fake_db_connection',
'fake_db_connection')
self.assertEqual(escaped_query, validator._src_sql)
self.assertEqual(escaped_query, validator._dest_sql)
def test__validate_good_range_partition_from_4_to_X(self):
options = self.setup_partition_validation()
singleton_side_effect = SingletonSideEffect()
singleton_side_effect.replace("SELECT version()", [
["PostgreSQL 8.2.15 (Greenplum Database 4.3.11.3-rc1-2-g3725a31 build 1) on x86_64-unknown-linux-gnu, "
"compiled by GCC gcc (GCC) 4.4.2 compiled on Jan 24 2017 14:17:39 (with assert checking)"],
["PostgreSQL 8.3.23 (Greenplum Database 5.0.0 build fdafasdf"],
])
singleton_side_effect.replace("select parisdefault, parruleord, parrangestartincl,", [
["f", "1", "t", "t",
"({CONST :consttype 1082 :constlen 4 :constbyval true :constisnull false :constvalue 4 []})",
"({CONST :consttype 1082 :constlen 4 :constbyval true :constisnull false :constvalue 4 []})",
"", ""],
["f", "1", "t", "t",
"({CONST :consttype 1082 :consttypmod -1 :constlen 4 :constbyval true :constisnull false "
":constvalue 4 []})",
"({CONST :consttype 1082 :consttypmod -1 :constlen 4 :constbyval true :constisnull false "
":constvalue 4 []})",
"", ""],
])
self.db_singleton.side_effect = singleton_side_effect.singleton_side_effect
self.subject.GpTransfer(Mock(**options), [])
self.assertIn("Validating partition table transfer set...", self.get_info_messages())
def test__validate_good_list_partition_from_4_to_X(self):
options = self.setup_partition_validation()
singleton_side_effect = SingletonSideEffect()
singleton_side_effect.replace("SELECT version()", [
["PostgreSQL 8.2.15 (Greenplum Database 4.3.11.3-rc1-2-g3725a31 build 1) on x86_64-unknown-linux-gnu, "
"compiled by GCC gcc (GCC) 4.4.2 compiled on Jan 24 2017 14:17:39 (with assert checking)"],
["PostgreSQL 8.3.23 (Greenplum Database 5.0.0 build fdafasdf"],
])
singleton_side_effect.replace("select parisdefault, parruleord, parrangestartincl,", [
["f", "1", "t", "t", "", "", "",
"(({CONST :consttype 25 :constlen -1 :constbyval false :constisnull false :constvalue 8 "
"[ 0 0 0 8 97 115 105 97 ]}))"],
["f", "1", "t", "t", "", "", "",
"(({CONST :consttype 25 :consttypmod -1 :constlen -1 :constbyval false :constisnull false :constvalue 8 "
"[ 0 0 0 8 97 115 105 97 ]}))"],
])
singleton_side_effect.replace("select partitiontype", [["list"]])
self.db_singleton.side_effect = singleton_side_effect.singleton_side_effect
self.subject.GpTransfer(Mock(**options), [])
self.assertIn("Validating partition table transfer set...", self.get_info_messages())
def test__validate_good_multi_column_list_partition_from_4_to_X(self):
options = self.setup_partition_validation()
singleton_side_effect = SingletonSideEffect()
singleton_side_effect.replace("SELECT version()", [
["PostgreSQL 8.2.15 (Greenplum Database 4.3.11.3-rc1-2-g3725a31 build 1) on x86_64-unknown-linux-gnu, "
"compiled by GCC gcc (GCC) 4.4.2 compiled on Jan 24 2017 14:17:39 (with assert checking)"],
["PostgreSQL 8.3.23 (Greenplum Database 5.0.0 build fdafasdf"],
])
singleton_side_effect.replace("select parisdefault, parruleord, parrangestartincl,", [
["f", "1", "t", "t", "", "", "",
"(({CONST :consttype 1042 :constlen -1 :constbyval false :constisnull false :constvalue 5 [ 0 0 0 5 77 ]} "
"{CONST :consttype 23 :constlen 4 :constbyval true :constisnull false :constvalue 4 [ 1 0 0 0 0 0 0 0 ]}))"],
["f", "1", "t", "t", "", "", "",
"(({CONST :consttype 1042 :consttypmod 5 :constlen -1 :constbyval false :constisnull false :constvalue 5 "
"[ 0 0 0 5 77 ]} {CONST :consttype 23 :consttypmod -1 :constlen 4 :constbyval true :constisnull false "
":constvalue 4 [ 1 0 0 0 0 0 0 0 ]}))"],
])
singleton_side_effect.replace("select partitiontype", [["list"]])
self.db_singleton.side_effect = singleton_side_effect.singleton_side_effect
self.subject.GpTransfer(Mock(**options), [])
self.assertIn("Validating partition table transfer set...", self.get_info_messages())
def test__validate_good_multi_column_swapped_column_ordering_list_partition_with_same_version(self):
options = self.setup_partition_validation()
singleton_side_effect = SingletonSideEffect()
singleton_side_effect.replace("SELECT version()", [
["PostgreSQL 8.3.23 (Greenplum Database 5.0.0 build fdafasdf"],
["PostgreSQL 8.3.23 (Greenplum Database 5.0.0 build fdafasdf"],
])
singleton_side_effect.replace("select parisdefault, parruleord, parrangestartincl,", [
["f", "1", "t", "t", "", "", "",
"(({CONST :consttype 1042 :consttypmod 5 :constlen -1 :constbyval false :constisnull false :constvalue 5 "
"[ 0 0 0 5 77 ]} "
"{CONST :consttype 23 :consttypmod -1 :constlen 4 :constbyval true :constisnull false :constvalue 4 "
"[ 1 0 0 0 0 0 0 0 ]}))"],
["f", "1", "t", "t", "", "", "",
"(({CONST :consttype 23 :consttypmod -1 :constlen 4 :constbyval true :constisnull false :constvalue 4 "
"[ 1 0 0 0 0 0 0 0 ]} "
"{CONST :consttype 1042 :consttypmod 5 :constlen -1 :constbyval false :constisnull false :constvalue 5 "
"[ 0 0 0 5 77 ]}))"],
])
singleton_side_effect.replace("select partitiontype", [["list"]])
self.db_singleton.side_effect = singleton_side_effect.singleton_side_effect
self.subject.GpTransfer(Mock(**options), [])
def test__validate_good_multi_column_swapped_column_ordering_list_partition_from_4_to_X(self):
options = self.setup_partition_validation()
singleton_side_effect = SingletonSideEffect()
singleton_side_effect.replace("SELECT version()", [
["PostgreSQL 8.2.15 (Greenplum Database 4.3.11.3-rc1-2-g3725a31 build 1) on x86_64-unknown-linux-gnu, "
"compiled by GCC gcc (GCC) 4.4.2 compiled on Jan 24 2017 14:17:39 (with assert checking)"],
["PostgreSQL 8.3.23 (Greenplum Database 5.0.0 build fdafasdf"],
])
singleton_side_effect.replace("select parisdefault, parruleord, parrangestartincl,", [
["f", "1", "t", "t", "", "", "",
"(({CONST :consttype 1042 :constlen -1 :constbyval false :constisnull false :constvalue 5 [ 0 0 0 5 77 ]} "
"{CONST :consttype 23 :constlen 4 :constbyval true :constisnull false :constvalue 4 [ 1 0 0 0 0 0 0 0 ]}))"],
["f", "1", "t", "t", "", "", "",
"(({CONST :consttype 23 :consttypmod -1 :constlen 4 :constbyval true :constisnull false :constvalue 4 "
"[ 1 0 0 0 0 0 0 0 ]} "
"{CONST :consttype 1042 :consttypmod 5 :constlen -1 :constbyval false :constisnull false :constvalue 5 "
"[ 0 0 0 5 77 ]}))"],
])
singleton_side_effect.replace("select partitiontype", [["list"]])
self.db_singleton.side_effect = singleton_side_effect.singleton_side_effect
self.subject.GpTransfer(Mock(**options), [])
self.assertIn("Validating partition table transfer set...", self.get_info_messages())
def test__validate_max_line_length_below_minimum(self):
options = self.setup_partition_to_normal_validation()
options.update(max_line_length=1024*16)
MIN_GPFDIST_MAX_LINE_LENGTH = 1024 * 32 # (32KB)
MAX_GPFDIST_MAX_LINE_LENGTH = 1024 * 1024 * 256 # (256MB)
with self.assertRaisesRegexp(Exception, "Invalid --max-line-length option. Value must be between %d and %d" % (MIN_GPFDIST_MAX_LINE_LENGTH,
MAX_GPFDIST_MAX_LINE_LENGTH)):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_max_line_length_above_maximum(self):
options = self.setup_partition_to_normal_validation()
options.update(max_line_length=1024*1024*512)
MIN_GPFDIST_MAX_LINE_LENGTH = 1024 * 32 # (32KB)
MAX_GPFDIST_MAX_LINE_LENGTH = 1024 * 1024 * 256 # (256MB)
with self.assertRaisesRegexp(Exception, "Invalid --max-line-length option. Value must be between %d and %d" % (MIN_GPFDIST_MAX_LINE_LENGTH,
MAX_GPFDIST_MAX_LINE_LENGTH)):
self.subject.GpTransfer(Mock(**options), [])
def test__validate_max_line_length_valid(self):
options = self.setup_partition_to_normal_validation()
options.update(max_line_length=1024*1024)
MIN_GPFDIST_MAX_LINE_LENGTH = 1024 * 32 # (32KB)
MAX_GPFDIST_MAX_LINE_LENGTH = 1024 * 1024 * 256 # (256MB)
self.subject.GpTransfer(Mock(**options), [])
####################################################################################################################
# End of tests, start of private methods/objects
####################################################################################################################
def get_error_logging(self):
return [args[0][0] for args in self.subject.logger.error.call_args_list]
def get_info_messages(self):
return [args[0][0] for args in self.subject.logger.info.call_args_list]
def get_warnings(self):
warnings = [args[0][0] for args in self.subject.logger.warning.call_args_list]
warns = [args[0][0] for args in self.subject.logger.warn.call_args_list]
return warnings + warns
def _get_gptransfer_command(self):
gptransfer = self.subject
cmd_args = self.GpTransferCommand_args
src_args = ('src', 'public', 'foo', False)
dest_args = ('dest', 'public', 'foo', False)
source_table = gptransfer.GpTransferTable(*src_args)
dest_table = gptransfer.GpTransferTable(*dest_args)
cmd_args['table_pair'] = gptransfer.GpTransferTablePair(source_table, dest_table)
return gptransfer.GpTransferCommand(**cmd_args)
def run_range_partition_value(self, additional):
options = self.setup_partition_validation()
self.db_singleton.side_effect = SingletonSideEffect(additional).singleton_side_effect
with self.assertRaisesRegexp(Exception, "has different partition criteria from destination table"):
self.subject.GpTransfer(Mock(**options), [])
log_messages = self.get_error_logging()
self.assertIn("Range partition value is different between", log_messages[0])
self.assertIn("Partition value is different in the partition hierarchy between", log_messages[1])
def createGpArrayWith2Primary2Mirrors(self):
master = Segment.initFromString(
"1|-1|p|p|s|u|mdw|mdw|5432|/data/master")
primary0 = Segment.initFromString(
"2|0|p|p|s|u|sdw1|sdw1|40000|/data/primary0")
primary1 = Segment.initFromString(
"3|1|p|p|s|u|sdw2|sdw2|40001|/data/primary1")
mirror0 = Segment.initFromString(
"4|0|m|m|s|u|sdw2|sdw2|50000|/data/mirror0")
mirror1 = Segment.initFromString(
"5|1|m|m|s|u|sdw1|sdw1|50001|/data/mirror1")
return GpArray([master, primary0, primary1, mirror0, mirror1])
def setup_partition_validation(self):
source_map_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
source_map_filename.write("sdw1,12700\nsdw2,12700")
input_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
input_filename.write("my_first_database.public.my_table_partition1")
self.cursor.side_effect = CursorSideEffect().cursor_side_effect
self.db_singleton.side_effect = SingletonSideEffect().singleton_side_effect
options = {}
options.update(self.GpTransfer_options_defaults)
options.update(
partition_transfer=True,
input_file=input_filename.name,
source_map_file=source_map_filename.name,
base_port=15432,
max_line_length=32768,
work_base_dir="/tmp",
source_port=45432,
dest_port=15432,
)
return options
def setup_partition_to_normal_validation(self):
source_map_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
source_map_filename.write("sdw1,12700\nsdw2,12700")
input_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
input_filename.write("my_first_database.public.my_table_partition1, "
"my_second_database.public.my_normal_table")
additional = {
cursor_keys['relations']: ["my_relname", "another_rel"],
}
self.cursor.side_effect = CursorSideEffect(additional=additional).cursor_side_effect
self.db_singleton.side_effect = SingletonSideEffect().singleton_side_effect
options = {}
options.update(self.GpTransfer_options_defaults)
options.update(
partition_transfer_non_pt_target=True,
input_file=input_filename.name,
source_map_file=source_map_filename.name,
base_port=15432,
max_line_length=32768,
work_base_dir="/tmp",
source_port=45432,
dest_port=15432,
)
return options
def setup_normal_to_normal_validation(self):
source_map_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
source_map_filename.write("sdw1,12700\nsdw2,12700")
source_map_filename.flush()
input_filename = tempfile.NamedTemporaryFile(dir=self.TEMP_DIR, delete=False)
input_filename.write("my_first_database.public.my_normal_table")
input_filename.flush()
additional = {
cursor_keys["normal_tables"]: [["public", "my_normal1_table", ""]],
}
cursor_side_effect = CursorSideEffect(additional=additional)
cursor_side_effect.second_values["normal_tables"] = [[]]
self.cursor.side_effect = cursor_side_effect.cursor_side_effect
self.db_singleton.side_effect = SingletonSideEffect().singleton_side_effect
options = {}
options.update(self.GpTransfer_options_defaults)
options.update(
input_file=input_filename.name,
source_map_file=source_map_filename.name,
base_port=15432,
max_line_length=32768,
work_base_dir="/tmp",
source_port=45432,
dest_port=15432,
)
return options
class CursorSideEffect:
def __init__(self, additional=None):
self.first_values = {
cursor_keys["normal_tables"]: [["public", "my_normal_table", ""]],
cursor_keys["partition_tables"]: [["public", "my_table_partition1", ""]],
cursor_keys['relations']: ["my_relname"],
cursor_keys['table_info']: [
["t", "my_data_type", 255, 16, 1024, 1024, 1, 1024, "my_interval_type", "my_udt_name"]],
cursor_keys['partition_info']: [["my_parkind", 1, 1, "1"]],
cursor_keys['schema_name']: ["public"],
cursor_keys['create_schema']: ["my_schema"],
cursor_keys['ordinal_pos']: [[1]],
}
self.counters = dict((key, 0) for key in self.first_values.keys())
self.second_values = self.first_values.copy()
if additional:
self.second_values.update(additional)
def cursor_side_effect(self, *args):
for key in self.first_values.keys():
for arg in args[1:]:
arg_oneline = " ".join(arg.split("\n"))
if key.search(arg_oneline):
if self.has_called(key):
return FakeCursor(self.second_values[key])
return FakeCursor(self.first_values[key])
return None
def has_called(self, key):
self.counters[key] += 1
return self.counters[key] > 1
def append_regexp_key(self, key, value):
self.first_values[key] = value
self.second_values[key] = value
self.counters[key] = 0
class SingletonSideEffect:
"""
Mocks out the results of execSQLForSingletonRow.
Any values which are provided as lists, using the "replace()" verb,
will be returned as a "side-effect" in the order provided.
"""
def __init__(self, additional_column=None, additional_col_list=None):
self.values = {
"select partitiontype": ["range"],
"select max(p1.partitionlevel)": [1],
"select schemaname, tablename from pg_catalog.pg_partitions": ["public", "my_table_partition1"],
"select c.oid": ["oid1", "oid1"],
"select parisdefault, parruleord, parrangestartincl,": ["f", "1", "t", "t", 100, 10, "", ""],
"select n.nspname, c.relname": ["public", "my_table_partition1"],
"SELECT count(*) FROM": [20],
"SELECT version()": ["PostgreSQL 8.3.23 (Greenplum Database 5.0.0 build fdafasdf"],
}
self.counters = dict((key, 0) for key in self.values.keys())
self.values = dict((key, [values]) for (key, values) in self.values.iteritems())
# allow additional column value(s) to be added via parameters
for key in self.values.keys():
if additional_column:
if key in additional_column:
values = self.values[key]
values.append(additional_column[key])
if additional_col_list:
if key in additional_col_list:
values = self.values[key]
values.extend(additional_col_list[key])
def singleton_side_effect(self, *args):
for key in self.values.keys():
for arg in args:
if key in arg:
value_list = self.values[key]
result = value_list[self.counters[key] % len(value_list)]
self.counters[key] += 1
return result
return None
def replace(self, key, value):
self.counters[key] = 0
self.values[key] = value
if __name__ == '__main__':
run_tests()
| |
"""
Protocol module
Handles the protocol for the synchronisation model
Copyright (c) 2009 John Markus Bjoerndalen <jmb@cs.uit.no>,
Brian Vinter <vinter@nbi.dk>, Rune M. Friborg <rune.m.friborg@gmail.com>.
See LICENSE.txt for licensing details (MIT License).
"""
import sys
import threading
from pycsp.parallel.exceptions import *
from pycsp.parallel.header import *
from pycsp.parallel.dispatch import *
from pycsp.parallel.const import *
from pycsp.parallel.configuration import *
conf = Configuration()
class ChannelMessenger(object):
def __init__(self):
self.dispatch = None
def restore(self):
"""
Restore dispatch thread, if the current thread is stale. This can happen when channelends are mobile and sent to other processes.
"""
if self.dispatch:
if not self.dispatch.is_alive():
self.dispatch = SocketDispatcher().getThread()
else:
self.dispatch = SocketDispatcher().getThread()
def register(self, channel):
"""
Registers a channel reference at the channel home thread
"""
self.restore()
try:
self.dispatch.send(channel.address,
Header(CHANTHREAD_REGISTER, channel.name))
except SocketException:
# Unable to register at channel home thread
raise ChannelConnectException(channel.address, "PyCSP (register channel) unable to reach channel home thread (%s at %s)" % (channel.name, str(channel.address)))
def deregister(self, channel):
self.restore()
try:
self.dispatch.send(channel.address,
Header(CHANTHREAD_DEREGISTER, channel.name))
except SocketException:
# Unable to deregister at channel home thread
# The channel thread may have been terminated forcefully, thus this is an acceptable situation.
pass
def join(self, channel, direction):
self.restore()
try:
if direction == READ:
self.dispatch.send(channel.address,
Header(CHANTHREAD_JOIN_READER, channel.name))
elif direction == WRITE:
self.dispatch.send(channel.address,
Header(CHANTHREAD_JOIN_WRITER, channel.name))
except SocketException:
# Unable to join channel
raise ChannelLostException(channel.address, "PyCSP (join channel) unable to reach channel home thread (%s at %s)" % (channel.name, str(channel.address)))
def retire(self, channel, direction):
self.restore()
try:
#print("CM RETIRE %s" % channel.name)
if direction == READ:
self.dispatch.send(channel.address,
Header(CHANTHREAD_RETIRE_READER, channel.name))
elif direction == WRITE:
self.dispatch.send(channel.address,
Header(CHANTHREAD_RETIRE_WRITER, channel.name))
except SocketException:
# Unable to retire from channel
if conf.get(SOCKETS_STRICT_MODE):
raise ChannelLostException(channel.address, "PyCSP (retire from channel) unable to reach channel home thread (%s at %s)" % (channel.name, str(channel.address)))
else:
sys.stderr.write("PyCSP (retire from channel) unable to reach channel home thread (%s at %s)\n" % (channel.name, str(channel.address)))
def poison(self, channel, direction):
self.restore()
try:
if direction == READ:
self.dispatch.send(channel.address,
Header(CHANTHREAD_POISON_READER, channel.name))
elif direction == WRITE:
self.dispatch.send(channel.address,
Header(CHANTHREAD_POISON_WRITER, channel.name))
except SocketException:
# Unable to poison channel
if conf.get(SOCKETS_STRICT_MODE):
raise ChannelLostException(channel.address, "PyCSP (poison channel) unable to reach channel home thread (%s at %s)" % (channel.name, str(channel.address)))
else:
sys.stderr.write("PyCSP (poison channel) unable to reach channel home thread (%s at %s)\n" % (channel.name, str(channel.address)))
def post_read(self, channel, process, ack=False):
self.restore()
# Enter channel and update NAT socket
if not channel in process.activeChanList:
process.activeChanList.append(channel)
self.enter(channel, process)
try:
if ack:
self.dispatch.send(channel.address,
Header(CHANTHREAD_POST_ACK_READ, channel.name, process.sequence_number, _source_id=process.id))
else:
self.dispatch.send(channel.address,
Header(CHANTHREAD_POST_READ, channel.name, process.sequence_number, _source_id=process.id))
except SocketException:
# Unable to post read request to channel home thread
raise FatalException("PyCSP (post read request) unable to reach channel home thread (%s at %s)" % (channel.name, str(channel.address)))
def post_write(self, channel, process, msg, ack=False):
self.restore()
# Enter channel and update NAT socket
if not channel in process.activeChanList:
process.activeChanList.append(channel)
self.enter(channel, process)
try:
if ack:
self.dispatch.send(channel.address,
Header(CHANTHREAD_POST_ACK_WRITE, channel.name, process.sequence_number, _source_id=process.id), payload=[msg])
else:
self.dispatch.send(channel.address,
Header(CHANTHREAD_POST_WRITE, channel.name, process.sequence_number, _source_id=process.id), payload=[msg])
except SocketException:
# Unable to post read request to channel home thread
raise FatalException("PyCSP (post write request) unable to reach channel home thread (%s at %s)" % (channel.name, str(channel.address)))
def enter(self, channel, process):
"""
The enter command is also used to update the reverse socket for traversing NAT
"""
self.restore()
try:
self.dispatch.send(channel.address,
Header(CHANTHREAD_ENTER, channel.name, _source_id=process.id))
# The reverse socket is added to the message at the destination dispatch thread
except SocketException:
# Unable to enter channel
raise ChannelLostException(channel.address, "PyCSP (enter channel) unable to reach channel home thread (%s at %s)" % (channel.name, str(channel.address)))
def leave(self, channel, process):
"""
The leave command is used to remove and forcefully deny all communication to a process
"""
self.restore()
try:
self.dispatch.send(channel.address,
Header(CHANTHREAD_LEAVE, channel.name, _source_id=process.id))
except SocketException:
# Unable to decrement writer count on channel
if conf.get(SOCKETS_STRICT_MODE):
raise ChannelLostException(channel.address, "PyCSP (leave channel) unable to reach channel home thread (%s at %s)" % (channel.name, str(channel.address)))
else:
sys.stderr.write("PyCSP (leave channel) unable to reach channel home thread (%s at %s)\n" % (channel.name, str(channel.address)))
class LockMessenger(object):
def __init__(self, channel_id):
self.dispatch = SocketDispatcher().getThread()
self.channel_id = channel_id
self.input = self.dispatch.getChannelQueue(channel_id)
def set_reverse_socket(self, addr, reverse_socket):
self.dispatch.add_reverse_socket(addr, reverse_socket)
def ack(self, dest):
"""
Send acknowledgement to process, that the posted request have been checked for
valid offers.
This ack is used to ensure prioritized selects.
"""
header = Header()
try:
h = Header(LOCKTHREAD_ACK, dest.id)
h._source_id = self.channel_id
self.dispatch.send(dest.hostNport, h)
except SocketException:
raise FatalException("Process %s is unavailable!", str(dest.id))
def remote_acquire_and_get_state(self, dest):
#sys.stderr.write("\nENTER REMOTE ACQUIRE\n")
if not dest.active:
return (None, FAIL, 0)
header = Header()
try:
#print("\n%s:SEND REMOTE ACQUIRE TO %s using %s" % (self.channel_id, dest.id, self.dispatch))
h = Header(LOCKTHREAD_ACQUIRE_LOCK, dest.id)
h._source_id = self.channel_id
self.dispatch.send(dest.hostNport, h)
msg = self.input.pop_reply()
if msg == None:
header.cmd = LOCKTHREAD_UNAVAILABLE
else:
header = msg.header
except SocketException:
#print "SocketException UNAVAILABLE!"
header.cmd = LOCKTHREAD_UNAVAILABLE
if header.cmd == LOCKTHREAD_UNAVAILABLE:
# connection broken.
# When a channel is unable to acquire the lock for process, the
# posted request is disabled.
dest.active = False
#sys.stderr.write("\nEXIT REMOTE ACQUIRE FAIL\n")
return (None, FAIL, 0)
if header.cmd != LOCKTHREAD_ACCEPT_LOCK:
raise Exception("Fatal error!")
#sys.stderr.write("\nEXIT REMOTE ACQUIRE SUCCESS\n")
return (header, header.arg, header.seq_number)
def remote_notify(self, source_header, dest, result_ch, result_msg=b""):
if dest.active:
try:
h = Header(LOCKTHREAD_NOTIFY_SUCCESS, dest.id)
h._source_id = self.channel_id
h._result_id = result_ch
self.dispatch.reply(source_header, h, payload=result_msg)
except SocketException:
raise AddrUnavailableException(dest)
def remote_poison(self, source_header, dest):
if dest.active:
try:
h = Header(LOCKTHREAD_POISON, dest.id)
h._source_id = self.channel_id
self.dispatch.reply(source_header, h)
except SocketException:
raise AddrUnavailableException(dest)
def remote_retire(self, source_header, dest):
if dest.active:
try:
h = Header(LOCKTHREAD_RETIRE, dest.id)
h._source_id = self.channel_id
self.dispatch.reply(source_header, h)
except SocketException:
raise AddrUnavailableException(dest)
def remote_release(self, source_header, dest):
"""
Ignore socket exceptions on remote_release
"""
if dest.active:
try:
h = Header(LOCKTHREAD_RELEASE_LOCK, dest.id)
h._source_id = self.channel_id
self.dispatch.reply(source_header, h)
except SocketException:
pass
def remote_final(self, dest):
"""
Tell remote lock, that this is the last communication
"""
if dest.active:
try:
h = Header(LOCKTHREAD_QUIT, dest.id)
h._source_id = self.channel_id
self.dispatch.send(dest.hostNport, h)
except SocketException:
pass
class RemoteLock(object):
def __init__(self, process):
self.process = process
self.cond = process.cond
self.dispatch = SocketDispatcher().getThread()
self.waiting = []
self.lock_acquired = None
def __repr__(self):
return repr("<pycsp.protocol.RemoteLock for process id:%s acquired:%s waiting:%s, fn:%s>" % (self.process.id, self.lock_acquired, str(self.waiting), self.process.fn))
def handle(self, message):
header = message.header
# Check id
if not (self.process.id == header.id):
raise Exception("Fatal error!, wrong process ID!")
if header.cmd == LOCKTHREAD_QUIT:
# May be interleaved with any other messages, as it is only sent when the process
# is ready to quit.
self.cond.acquire()
self.process.closedChanList.append(header._source_id)
self.cond.notify()
self.cond.release()
elif header.cmd == LOCKTHREAD_ACK:
# Send acknowledgement to process through the condition variable.
# Used for prioritised select
self.cond.acquire()
if self.process.ack:
raise Exception("PyCSP Panic")
self.process.ack= True
self.cond.notify()
self.cond.release()
elif header.cmd == LOCKTHREAD_ACQUIRE_LOCK:
#print("\n%s:GOT REMOTE ACQUIRE FROM %s" % (self.process.id, header._source_id))
if not self.lock_acquired == None:
self.waiting.append(message)
else:
self.lock_acquired = header._source_id
# Send reply
self.dispatch.reply(header, Header(LOCKTHREAD_ACCEPT_LOCK, header._source_id, self.process.sequence_number, self.process.state))
elif header.cmd == LOCKTHREAD_NOTIFY_SUCCESS:
#print("%s NOTIFY\n" % (self.process.id))
if self.lock_acquired == header._source_id:
self.cond.acquire()
if self.process.state != READY:
raise Exception("PyCSP Panic")
self.process.result_ch = header._result_id
# The unpickling must be postponed to the @process
self.process.result_msg = message.payload
self.process.state = SUCCESS
self.cond.notify()
self.cond.release()
else:
#print "'%s','%s'" %(self.lock_acquired, )
raise Exception("Fatal error!, Remote lock has not been acquired!")
elif header.cmd == LOCKTHREAD_POISON:
#print("%s POISON\n" % (self.process.id))
if self.lock_acquired == header._source_id:
self.cond.acquire()
if self.process.state == READY:
self.process.state = POISON
self.cond.notify()
self.cond.release()
else:
raise Exception("Fatal error!, Remote lock has not been acquired!")
elif header.cmd == LOCKTHREAD_RETIRE:
#print("%s RETIRE\n" % (self.process.id))
if self.lock_acquired == header._source_id:
self.cond.acquire()
if self.process.state == READY:
self.process.state = RETIRE
self.cond.notify()
self.cond.release()
else:
raise Exception("Fatal error!, Remote lock has not been acquired!")
elif header.cmd == LOCKTHREAD_RELEASE_LOCK:
#print("%s RELEASE\n" % (self.process.id))
if self.lock_acquired == header._source_id:
self.lock_acquired = None
if self.waiting:
self.handle(self.waiting.pop(0))
else:
raise Exception("Fatal error!, Remote lock has not been acquired!")
class Buffer(object):
def __init__(self, LM, max):
self.max = max
self.items = []
self.ispoisoned = False
self.isretired = False
self.LM = LM
def isfull(self):
return len(self.items) == self.max
def isempty(self):
return len(self.items) == 0
def insertfrom(self, writer):
success = False
remove_write = False
# Check for available buffer space
if len(self.items) < self.max:
try:
w_conn, w_state, w_seq = self.LM.remote_acquire_and_get_state(writer.process)
if w_seq != writer.seq_check:
w_state = FAIL
if (w_state == READY):
self.items.append(writer.msg)
self.LM.remote_notify(w_conn, writer.process, writer.ch_id)
success = True
w_state = SUCCESS
# Schedule removal of NOT READY requests from channel
if (w_state != READY):
remove_write = True
self.LM.remote_release(w_conn, writer.process)
except AddrUnavailableException:
remove_write = True
return (remove_write, success)
def putinto(self, reader):
success = False
remove_read = False
# Check for available buffer items
if self.items:
try:
r_conn, r_state, r_seq = self.LM.remote_acquire_and_get_state(reader.process)
if r_seq != reader.seq_check:
r_state = FAIL
if (r_state == READY):
msg = self.items.pop(0)
self.LM.remote_notify(r_conn, reader.process, reader.ch_id, msg)
success = True
r_state = SUCCESS
# Schedule removal of NOT READY requests from channel
if (r_state != READY):
remove_read = True
self.LM.remote_release(r_conn, reader.process)
except AddrUnavailableException:
remove_read = True
return (remove_read, success)
class ChannelHome(object):
def __init__(self, name, buffer):
self.readqueue=[]
self.writequeue=[]
self.ispoisoned=False
self.isretired=False
self.readers=0
self.writers=0
self.channelreferences = 0
self.name = name
self.LM = LockMessenger(name)
if buffer > 0:
self.buffer = Buffer(self.LM, buffer)
else:
self.buffer = None
def check_termination(self):
"""
This method is invoked on the initial posting of a request.
It checks the buffer for any poison / retire pill, which has
been prosponed because of buffered messages. If buffer is now
empty, then the channel is poisoned / retired.
"""
if self.buffer:
# Buffer enabled
if self.buffer.ispoisoned:
if self.buffer.isempty():
self.poison_writer()
if self.buffer.isretired:
if self.buffer.isempty():
self.isretired= True
for p in self.readqueue:
p.retire()
self.readqueue = []
if self.ispoisoned:
raise ChannelPoisonException()
if self.isretired:
raise ChannelRetireException()
def post_read(self, req):
self.check_termination()
success = True
if self.isretired or self.ispoisoned:
success = False
else:
self.readqueue.append(req)
if success:
self.match()
else:
self.check_termination()
def post_write(self, req):
self.check_termination()
success = True
if self.isretired or self.ispoisoned:
success = False
else:
self.writequeue.append(req)
if success:
self.match()
else:
self.check_termination()
def leave(self, process_id):
self.readqueue = [x for x in self.readqueue if not x.process.id == process_id]
self.writequeue = [x for x in self.writequeue if not x.process.id == process_id]
def match(self):
if self.buffer:
# Buffering is enabled.
if self.buffer.isfull():
# Extract item
for r in self.readqueue[:]:
remove_read, success = self.buffer.putinto(r)
if remove_read:
self.readqueue.remove(r)
if success:
break
# Insert item
for w in self.writequeue[:]:
remove_write, success = self.buffer.insertfrom(w)
if remove_write:
self.writequeue.remove(w)
if success:
break
else:
# Insert item
for w in self.writequeue[:]:
remove_write, success = self.buffer.insertfrom(w)
if remove_write:
self.writequeue.remove(w)
if success:
break
# Extract item
for r in self.readqueue[:]:
remove_read, success = self.buffer.putinto(r)
if remove_read:
self.readqueue.remove(r)
if success:
break
else:
# Standard matching if no buffer
for w in self.writequeue[:]:
for r in self.readqueue[:]:
remove_write, remove_read, success = w.offer(r)
if remove_read:
self.readqueue.remove(r)
if remove_write:
self.writequeue.remove(w)
if success:
return # break match loop on first success
break
if success:
return # break match loop on first success
# The method for poisoning non-buffered channels is identical
# for both the reading and writing end, while the method differs
# for buffered channels.
# In buffered channels the poison pill must be propagated through
# the buffer slots before the other end is poisoned.
# An exception has been made for the reading end:
# To allow poisoning to skip
# buffer slots and instantly poison the writing end.
def poison_reader(self):
self.ispoisoned=True
for p in self.readqueue:
p.poison()
for p in self.writequeue:
p.poison()
# flush all requests
self.readqueue = []
self.writequeue = []
def poison_writer(self):
if self.buffer and not self.buffer.isempty():
# Buffer is enabled and has content
self.buffer.ispoisoned = True
for p in self.writequeue:
p.poison()
# flush all write requests
self.writequeue = []
else:
self.ispoisoned=True
for p in self.readqueue:
p.poison()
for p in self.writequeue:
p.poison()
# flush all requests
self.readqueue = []
self.writequeue = []
def retire_reader(self):
self.readers-=1
#print("%s READERS LEFT %d (retired:%s)" % (self.name, self.readers, str(self.isretired)))
if not self.isretired:
if self.readers==0:
self.isretired= True
#print "WRITEQUEUE",self.writequeue
for p in self.writequeue:
p.retire()
#self.writequeue = []
def retire_writer(self):
self.writers-=1
#print("%s WRITERS LEFT %d (retired:%s)" % (self.name, self.writers, str(self.isretired)))
if not self.isretired:
if self.writers==0:
if self.buffer and not self.buffer.isempty():
# Buffer is enabled and has content
self.buffer.isretired = True
else:
self.isretired= True
#print "READQUEUE",self.readqueue
for p in self.readqueue:
p.retire()
#self.readqueue = []
def join_reader(self):
self.readers+=1
def join_writer(self):
self.writers+=1
def register(self):
self.channelreferences += 1
def deregister(self):
self.channelreferences -= 1
if self.channelreferences == 0:
# Shutdown
return True
return False
class AddrID(object):
def __init__(self, addr=(b"",0), id=b""):
self.hostNport = addr
self.id = id
self.active = True
class ChannelReq(object):
def __init__(self, LM, process_src, process_seq, ch_id, msg = None):
self.process = process_src
self.ch_id = ch_id
self.msg = msg
# check_sequence contains a number which must be equivalent with the sequence
# number returned by remote_acquire_and_get_state.
self.seq_check = process_seq
self.LM = LM
def cancel(self):
try:
conn, state, seq = self.LM.remote_acquire_and_get_state(self.process)
if seq == self.seq_check:
self.LM.remote_cancel(conn, self.process)
self.LM.remote_release(conn, self.process)
except AddrUnavailableException:
# Unable to reach process to notify cancel
if conf.get(SOCKETS_STRICT_MODE):
raise FatalException("PyCSP (cancel notification) unable to reach process (%s)" % str(self.process))
else:
sys.stderr.write("PyCSP (cancel notification) unable to reach process (%s)\n" % str(self.process))
def poison(self):
try:
#print("\n%s:REQUESTING LOCK" % self.ch_id)
conn, state, seq = self.LM.remote_acquire_and_get_state(self.process)
#print("\n%s:ACQUIRED LOCK" % self.ch_id)
if seq == self.seq_check:
self.LM.remote_poison(conn, self.process)
#Ignore if sequence is incorrect
self.LM.remote_release(conn, self.process)
except AddrUnavailableException:
# Unable to reach process to notify poison
if conf.get(SOCKETS_STRICT_MODE):
raise FatalException("PyCSP (poison notification) unable to reach process (%s)" % str(self.process))
else:
sys.stderr.write("PyCSP (poison notification) unable to reach process (%s)\n" % str(self.process))
def retire(self):
try:
conn, state, seq = self.LM.remote_acquire_and_get_state(self.process)
#print "remote retire"
if seq == self.seq_check:
self.LM.remote_retire(conn, self.process)
#Ignore if sequence is incorrect
self.LM.remote_release(conn, self.process)
except AddrUnavailableException:
# Unable to reach process to notify retire
if conf.get(SOCKETS_STRICT_MODE):
raise FatalException("PyCSP (retire notification) unable to reach process (%s)" % str(self.process))
else:
sys.stderr.write("PyCSP (retire notification) unable to reach process (%s)\n" % str(self.process))
def offer(self, reader):
success = False
remove_write = False
remove_read = False
try:
# Acquire double lock
if (self.process.id < reader.process.id):
w_conn, w_state, w_seq = self.LM.remote_acquire_and_get_state(self.process)
r_conn, r_state, r_seq = self.LM.remote_acquire_and_get_state(reader.process)
else:
r_conn, r_state, r_seq = self.LM.remote_acquire_and_get_state(reader.process)
w_conn, w_state, w_seq = self.LM.remote_acquire_and_get_state(self.process)
# Check sequence numbers
if r_seq != reader.seq_check:
r_state = FAIL
if w_seq != self.seq_check:
w_state = FAIL
# Success?
if (r_state == READY and w_state == READY):
self.LM.remote_notify(r_conn, reader.process, reader.ch_id, self.msg)
self.LM.remote_notify(w_conn, self.process, self.ch_id)
success = True
r_state = SUCCESS
w_state = SUCCESS
# Schedule removal of NOT READY requests from channel
if (r_state != READY):
remove_read = True
if (w_state != READY):
remove_write = True
# Release double lock
if (self.process.id < reader.process.id):
self.LM.remote_release(r_conn, reader.process)
self.LM.remote_release(w_conn, self.process)
else:
self.LM.remote_release(w_conn, self.process)
self.LM.remote_release(r_conn, reader.process)
except AddrUnavailableException as e:
# Unable to reach process during offer
# The primary reason is probably because a request were part of an alting and the process have exited.
if conf.get(SOCKETS_STRICT_MODE):
raise FatalException("PyCSP unable to reach process during offer(%s)" % str(self.process))
else:
sys.stderr.write("PyCSP unable to reach process during offer(%s)\n" % str(self.process))
success = False
if e.addr == self.process.hostNport:
remove_write = True
if e.addr == reader.process.hostNport:
remove_read = True
return (remove_write, remove_read, success)
class ChannelHomeThread(threading.Thread):
def __init__(self, name, buffer, addr = None):
threading.Thread.__init__(self)
# This may cause the thread to terminate unexpectedly and thus
# leave processes in an inconsistent state.
# To enforce a nice shutdown, the Shutdown function must be called
# by the user
self.daemon = False
self.id = name
self.dispatch = SocketDispatcher().getThread()
self.addr = self.dispatch.server_addr
# Returns synchronized Queue object where messages are retrieved from.
self.input = self.dispatch.registerChannel(self.id)
self.channel = ChannelHome(name, buffer)
def run(self):
LM = self.channel.LM
while(True):
msg = self.input.pop_normal()
header = msg.header
#print("GOT %s for %s" % (cmd2str(header.cmd), self.id))
if header.cmd == CHANTHREAD_JOIN_READER:
self.channel.join_reader()
elif header.cmd == CHANTHREAD_JOIN_WRITER:
self.channel.join_writer()
elif header.cmd == CHANTHREAD_RETIRE_READER:
self.channel.retire_reader()
elif header.cmd == CHANTHREAD_RETIRE_WRITER:
self.channel.retire_writer()
elif header.cmd == CHANTHREAD_REGISTER:
self.channel.register()
elif header.cmd == CHANTHREAD_DEREGISTER:
is_final = self.channel.deregister()
if is_final:
#print "SHUTDOWN"
# TODO: Ensure that the channel is unused
# TODO: Check if any unread messages is left in channel?
self.dispatch.deregisterChannel(self.id)
return
elif header.cmd == CHANTHREAD_POISON_READER:
self.channel.poison_reader()
elif header.cmd == CHANTHREAD_POISON_WRITER:
self.channel.poison_writer()
elif header.cmd == CHANTHREAD_POST_WRITE or header.cmd == CHANTHREAD_POST_ACK_WRITE:
process = AddrID((header._source_host, header._source_port), header._source_id)
msg = msg.payload
try:
#print "posted write1"
self.channel.post_write(ChannelReq(LM, process, header.seq_number, self.channel.name, msg))
#print "posted write2"
except ChannelPoisonException:
try:
lock_s, state, seq = LM.remote_acquire_and_get_state(process)
if seq == header.seq_number:
if state == READY:
LM.remote_poison(lock_s, process)
# Ignore if wrong sequence number
LM.remote_release(lock_s, process)
except AddrUnavailableException:
# Unable to reach process to notify poison
if conf.get(SOCKETS_STRICT_MODE):
raise FatalException("PyCSP (poison notification:2) unable to reach process (%s)" % str(process))
else:
sys.stderr.write("PyCSP (poison notification:2) unable to reach process (%s)\n" % str(process))
except ChannelRetireException:
try:
lock_s, state, seq = LM.remote_acquire_and_get_state(process)
if seq == header.seq_number:
if state == READY:
LM.remote_retire(lock_s, process)
# Ignore if wrong sequence number
LM.remote_release(lock_s, process)
except AddrUnavailableException:
# Unable to reach process to notify retire
if conf.get(SOCKETS_STRICT_MODE):
raise FatalException("PyCSP (retire notification:2) unable to reach process (%s)" % str(process))
else:
sys.stderr.write("PyCSP (retire notification:2) unable to reach process (%s)\n" % str(process))
# Send acknowledgement to process. (used to ensure prioritized select)
if header.cmd == CHANTHREAD_POST_ACK_WRITE:
LM.ack(process)
elif header.cmd == CHANTHREAD_POST_READ or header.cmd == CHANTHREAD_POST_ACK_READ:
process = AddrID((header._source_host, header._source_port), header._source_id)
try:
self.channel.post_read(ChannelReq(LM, process, header.seq_number, self.channel.name))
except ChannelPoisonException:
try:
lock_s, state, seq = LM.remote_acquire_and_get_state(process)
if seq == header.seq_number:
if state == READY:
LM.remote_poison(lock_s, process)
# Ignore if wrong sequence number
LM.remote_release(lock_s, process)
except AddrUnavailableException:
# Unable to reach process to notify poison
if conf.get(SOCKETS_STRICT_MODE):
raise FatalException("PyCSP (poison notification:3) unable to reach process (%s)" % str(process))
else:
sys.stderr.write("PyCSP (poison notification:3) unable to reach process (%s)\n" % str(process))
except ChannelRetireException:
try:
lock_s, state, seq = LM.remote_acquire_and_get_state(process)
if seq == header.seq_number:
if state == READY:
LM.remote_retire(lock_s, process)
# Ignore if wrong sequence number
LM.remote_release(lock_s, process)
except AddrUnavailableException:
# Unable to reach process to notify retire
if conf.get(SOCKETS_STRICT_MODE):
raise FatalException("PyCSP (retire notification:3) unable to reach process (%s)" % str(process))
else:
sys.stderr.write("PyCSP (retire notification:3) unable to reach process (%s)\n" % str(process))
# Send acknowledgement to process. (used to ensure prioritized select)
if header.cmd == CHANTHREAD_POST_ACK_READ:
LM.ack(process)
elif header.cmd == CHANTHREAD_ENTER:
socket = msg.natfix
addr = (header._source_host, header._source_port)
if socket:
LM.set_reverse_socket(addr, socket)
# Possible code to register process at channel
elif header.cmd == CHANTHREAD_LEAVE:
paddr = AddrID((header._source_host, header._source_port), header._source_id)
# Final communication to process. Poison or retire can never come after leave.
self.channel.leave(paddr.id)
LM.remote_final(paddr)
| |
from array import array
from whoosh.compat import xrange
from whoosh.system import emptybytes
from whoosh.system import pack_byte, unpack_byte
from whoosh.system import pack_ushort_le, unpack_ushort_le
from whoosh.system import pack_uint_le, unpack_uint_le
def delta_encode(nums):
base = 0
for n in nums:
yield n - base
base = n
def delta_decode(nums):
base = 0
for n in nums:
base += n
yield base
class GrowableArray(object):
def __init__(self, inittype="B", allow_longs=True):
self.array = array(inittype)
self._allow_longs = allow_longs
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.array)
def __len__(self):
return len(self.array)
def __iter__(self):
return iter(self.array)
def _retype(self, maxnum):
if maxnum < 2 ** 16:
newtype = "H"
elif maxnum < 2 ** 31:
newtype = "i"
elif maxnum < 2 ** 32:
newtype = "I"
elif self._allow_longs:
newtype = "q"
else:
raise OverflowError("%r is too big to fit in an array" % maxnum)
try:
self.array = array(newtype, iter(self.array))
except ValueError:
self.array = list(self.array)
def append(self, n):
try:
self.array.append(n)
except OverflowError:
self._retype(n)
self.array.append(n)
def extend(self, ns):
append = self.append
for n in ns:
append(n)
@property
def typecode(self):
if isinstance(self.array, array):
return self.array.typecode
else:
return "q"
def to_file(self, dbfile):
if isinstance(self.array, array):
dbfile.write_array(self.array)
else:
write_long = dbfile.write_long
for n in self.array:
write_long(n)
# Number list encoding base class
class NumberEncoding(object):
maxint = None
def write_nums(self, f, numbers):
raise NotImplementedError
def read_nums(self, f, n):
raise NotImplementedError
def write_deltas(self, f, numbers):
return self.write_nums(f, list(delta_encode(numbers)))
def read_deltas(self, f, n):
return delta_decode(self.read_nums(f, n))
def get(self, f, pos, i):
f.seek(pos)
n = None
for n in self.read_nums(f, i + 1):
pass
return n
# Fixed width encodings
class FixedEncoding(NumberEncoding):
_encode = None
_decode = None
size = None
def write_nums(self, f, numbers):
_encode = self._encode
for n in numbers:
f.write(_encode(n))
def read_nums(self, f, n):
_decode = self._decode
for _ in xrange(n):
yield _decode(f.read(self.size))
def get(self, f, pos, i):
f.seek(pos + i * self.size)
return self._decode(f.read(self.size))
class ByteEncoding(FixedEncoding):
size = 1
maxint = 255
_encode = pack_byte
_decode = unpack_byte
class UShortEncoding(FixedEncoding):
size = 2
maxint = 2 ** 16 - 1
_encode = pack_ushort_le
_decode = unpack_ushort_le
class UIntEncoding(FixedEncoding):
size = 4
maxint = 2 ** 32 - 1
_encode = pack_uint_le
_decode = unpack_uint_le
# High-bit encoded variable-length integer
class Varints(NumberEncoding):
maxint = None
def write_nums(self, f, numbers):
for n in numbers:
f.write_varint(n)
def read_nums(self, f, n):
for _ in xrange(n):
yield f.read_varint()
# Simple16 algorithm for storing arrays of positive integers (usually delta
# encoded lists of sorted integers)
#
# 1. http://www2008.org/papers/pdf/p387-zhangA.pdf
# 2. http://www2009.org/proceedings/pdf/p401.pdf
class Simple16(NumberEncoding):
# The maximum possible integer value Simple16 can encode is < 2^28.
# Therefore, in order to use Simple16, the application must have its own
# code to encode numbers in the range of [2^28, 2^32). A simple way is just
# write those numbers as 32-bit integers (that is, no compression for very
# big numbers).
_numsize = 16
_bitsize = 28
maxint = 2 ** _bitsize - 1
# Number of stored numbers per code
_num = [28, 21, 21, 21, 14, 9, 8, 7, 6, 6, 5, 5, 4, 3, 2, 1]
# Number of bits for each number per code
_bits = [
(1,) * 28,
(2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
(1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1),
(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2),
(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
(4, 3, 3, 3, 3, 3, 3, 3, 3),
(3, 4, 4, 4, 4, 3, 3, 3),
(4, 4, 4, 4, 4, 4, 4),
(5, 5, 5, 5, 4, 4),
(4, 4, 5, 5, 5, 5),
(6, 6, 6, 5, 5),
(5, 5, 6, 6, 6),
(7, 7, 7, 7),
(10, 9, 9),
(14, 14),
(28,),
]
def write_nums(self, f, numbers):
_compress = self._compress
i = 0
while i < len(numbers):
value, taken = _compress(numbers, i, len(numbers) - i)
f.write_uint_le(value)
i += taken
def _compress(self, inarray, inoffset, n):
_numsize = self._numsize
_bitsize = self._bitsize
_num = self._num
_bits = self._bits
for key in xrange(_numsize):
value = key << _bitsize
num = _num[key] if _num[key] < n else n
bits = 0
j = 0
while j < num and inarray[inoffset + j] < (1 << _bits[key][j]):
x = inarray[inoffset + j]
value |= x << bits
bits += _bits[key][j]
j += 1
if j == num:
return value, num
raise Exception
def read_nums(self, f, n):
_decompress = self._decompress
i = 0
while i < n:
value = unpack_uint_le(f.read(4))[0]
for v in _decompress(value, n - i):
yield v
i += 1
def _decompress(self, value, n):
_numsize = self._numsize
_bitsize = self._bitsize
_num = self._num
_bits = self._bits
key = value >> _bitsize
num = _num[key] if _num[key] < n else n
bits = 0
for j in xrange(num):
v = value >> bits
yield v & (0xffffffff >> (32 - _bits[key][j]))
bits += _bits[key][j]
def get(self, f, pos, i):
f.seek(pos)
base = 0
value = unpack_uint_le(f.read(4))
key = value >> self._bitsize
num = self._num[key]
while i > base + num:
base += num
value = unpack_uint_le(f.read(4))
key = value >> self._bitsize
num = self._num[key]
offset = i - base
if offset:
value = value >> sum(self._bits[key][:offset])
return value & (2 ** self._bits[key][offset] - 1)
# Google Packed Ints algorithm: a set of four numbers is preceded by a "key"
# byte, which encodes how many bytes each of the next four integers use
# (stored in the byte as four 2-bit numbers)
class GInts(NumberEncoding):
maxint = 2 ** 32 - 1
# Number of future bytes to expect after a "key" byte value of N -- used to
# skip ahead from a key byte
_lens = array("B", [4, 5, 6, 7, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 5, 6,
7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9,
10, 11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11,
12, 13, 5, 6, 7, 8, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 6, 7, 8, 9, 7,
8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10,
11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11,
12, 13, 14, 6, 7, 8, 9, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 7, 8, 9,
10, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 8, 9, 10, 11, 9, 10, 11,
12, 10, 11, 12, 13, 11, 12, 13, 14, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12,
13, 14, 12, 13, 14, 15, 7, 8, 9, 10, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11,
12, 13, 8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 9, 10,
11, 12, 10, 11, 12, 13, 11, 12, 13, 14, 12, 13, 14, 15, 10, 11, 12, 13, 11,
12, 13, 14, 12, 13, 14, 15, 13, 14, 15, 16])
def key_to_sizes(self, key):
"""Returns a list of the sizes of the next four numbers given a key
byte.
"""
return [(key >> (i * 2) & 3) + 1 for i in xrange(4)]
def write_nums(self, f, numbers):
buf = emptybytes
count = 0
key = 0
for v in numbers:
shift = count * 2
if v < 256:
buf += pack_byte(v)
elif v < 65536:
key |= 1 << shift
buf += pack_ushort_le(v)
elif v < 16777216:
key |= 2 << shift
buf += pack_uint_le(v)[:3]
else:
key |= 3 << shift
buf += pack_uint_le(v)
count += 1
if count == 4:
f.write_byte(key)
f.write(buf)
count = 0
key = 0
buf = emptybytes # Clear the buffer
# Write out leftovers in the buffer
if count:
f.write_byte(key)
f.write(buf)
def read_nums(self, f, n):
"""Read N integers from the bytes stream dbfile. Expects that the file
is positioned at a key byte.
"""
count = 0
key = None
for _ in xrange(n):
if count == 0:
key = f.read_byte()
code = key >> (count * 2) & 3
if code == 0:
yield f.read_byte()
elif code == 1:
yield f.read_ushort_le()
elif code == 2:
yield unpack_uint_le(f.read(3) + "\x00")[0]
else:
yield f.read_uint_le()
count = (count + 1) % 4
# def get(self, f, pos, i):
# f.seek(pos)
# base = 0
# key = f.read_byte()
# while i > base + 4:
# base += 4
# f.seek(self._lens[key], 1)
# key = f.read_byte()
#
# for n in self.read_nums(f, (i + 1) - base):
# pass
# return n
| |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import hashlib
from typing import Dict, List, TYPE_CHECKING, Tuple, Set
from collections import defaultdict
import logging
from aiorpcx import run_in_thread, RPCError
from . import util
from .transaction import Transaction, PartialTransaction
from .util import bh2u, make_aiohttp_session, NetworkJobOnDefaultServer, random_shuffled_copy, OldTaskGroup
from .bitcoin import address_to_scripthash, is_address
from .logging import Logger
from .interface import GracefulDisconnect, NetworkTimeout
if TYPE_CHECKING:
from .network import Network
from .address_synchronizer import AddressSynchronizer
class SynchronizerFailure(Exception): pass
def history_status(h):
if not h:
return None
status = ''
for tx_hash, height in h:
status += tx_hash + ':%d:' % height
return bh2u(hashlib.sha256(status.encode('ascii')).digest())
class SynchronizerBase(NetworkJobOnDefaultServer):
"""Subscribe over the network to a set of addresses, and monitor their statuses.
Every time a status changes, run a coroutine provided by the subclass.
"""
def __init__(self, network: 'Network'):
self.asyncio_loop = network.asyncio_loop
self._reset_request_counters()
NetworkJobOnDefaultServer.__init__(self, network)
def _reset(self):
super()._reset()
self.requested_addrs = set()
self.scripthash_to_address = {}
self._processed_some_notifications = False # so that we don't miss them
self._reset_request_counters()
# Queues
self.add_queue = asyncio.Queue()
self.status_queue = asyncio.Queue()
async def _run_tasks(self, *, taskgroup):
await super()._run_tasks(taskgroup=taskgroup)
try:
async with taskgroup as group:
await group.spawn(self.send_subscriptions())
await group.spawn(self.handle_status())
await group.spawn(self.main())
finally:
# we are being cancelled now
self.session.unsubscribe(self.status_queue)
def _reset_request_counters(self):
self._requests_sent = 0
self._requests_answered = 0
def add(self, addr):
asyncio.run_coroutine_threadsafe(self._add_address(addr), self.asyncio_loop)
async def _add_address(self, addr: str):
# note: this method is async as add_queue.put_nowait is not thread-safe.
if not is_address(addr): raise ValueError(f"invalid bitcoin address {addr}")
if addr in self.requested_addrs: return
self.requested_addrs.add(addr)
self.add_queue.put_nowait(addr)
async def _on_address_status(self, addr, status):
"""Handle the change of the status of an address."""
raise NotImplementedError() # implemented by subclasses
async def send_subscriptions(self):
async def subscribe_to_address(addr):
h = address_to_scripthash(addr)
self.scripthash_to_address[h] = addr
self._requests_sent += 1
try:
async with self._network_request_semaphore:
await self.session.subscribe('blockchain.scripthash.subscribe', [h], self.status_queue)
except RPCError as e:
if e.message == 'history too large': # no unique error code
raise GracefulDisconnect(e, log_level=logging.ERROR) from e
raise
self._requests_answered += 1
self.requested_addrs.remove(addr)
while True:
addr = await self.add_queue.get()
await self.taskgroup.spawn(subscribe_to_address, addr)
async def handle_status(self):
while True:
h, status = await self.status_queue.get()
addr = self.scripthash_to_address[h]
await self.taskgroup.spawn(self._on_address_status, addr, status)
self._processed_some_notifications = True
def num_requests_sent_and_answered(self) -> Tuple[int, int]:
return self._requests_sent, self._requests_answered
async def main(self):
raise NotImplementedError() # implemented by subclasses
class Synchronizer(SynchronizerBase):
'''The synchronizer keeps the wallet up-to-date with its set of
addresses and their transactions. It subscribes over the network
to wallet addresses, gets the wallet to generate new addresses
when necessary, requests the transaction history of any addresses
we don't have the full history of, and requests binary transaction
data of any transactions the wallet doesn't have.
'''
def __init__(self, wallet: 'AddressSynchronizer'):
self.wallet = wallet
SynchronizerBase.__init__(self, wallet.network)
def _reset(self):
super()._reset()
self.requested_tx = {}
self.requested_histories = set()
self._stale_histories = dict() # type: Dict[str, asyncio.Task]
def diagnostic_name(self):
return self.wallet.diagnostic_name()
def is_up_to_date(self):
return (not self.requested_addrs
and not self.requested_histories
and not self.requested_tx
and not self._stale_histories)
async def _on_address_status(self, addr, status):
history = self.wallet.db.get_addr_history(addr)
if history_status(history) == status:
return
# No point in requesting history twice for the same announced status.
# However if we got announced a new status, we should request history again:
if (addr, status) in self.requested_histories:
return
# request address history
self.requested_histories.add((addr, status))
self._stale_histories.pop(addr, asyncio.Future()).cancel()
h = address_to_scripthash(addr)
self._requests_sent += 1
async with self._network_request_semaphore:
result = await self.interface.get_history_for_scripthash(h)
self._requests_answered += 1
self.logger.info(f"receiving history {addr} {len(result)}")
hist = list(map(lambda item: (item['tx_hash'], item['height']), result))
# tx_fees
tx_fees = [(item['tx_hash'], item.get('fee')) for item in result]
tx_fees = dict(filter(lambda x:x[1] is not None, tx_fees))
# Check that the status corresponds to what was announced
if history_status(hist) != status:
# could happen naturally if history changed between getting status and history (race)
self.logger.info(f"error: status mismatch: {addr}. we'll wait a bit for status update.")
# The server is supposed to send a new status notification, which will trigger a new
# get_history. We shall wait a bit for this to happen, otherwise we disconnect.
async def disconnect_if_still_stale():
timeout = self.network.get_network_timeout_seconds(NetworkTimeout.Generic)
await asyncio.sleep(timeout)
raise SynchronizerFailure(f"timeout reached waiting for addr {addr}: history still stale")
self._stale_histories[addr] = await self.taskgroup.spawn(disconnect_if_still_stale)
else:
self._stale_histories.pop(addr, asyncio.Future()).cancel()
# Store received history
self.wallet.receive_history_callback(addr, hist, tx_fees)
# Request transactions we don't have
await self._request_missing_txs(hist)
# Remove request; this allows up_to_date to be True
self.requested_histories.discard((addr, status))
async def _request_missing_txs(self, hist, *, allow_server_not_finding_tx=False):
# "hist" is a list of [tx_hash, tx_height] lists
transaction_hashes = []
for tx_hash, tx_height in hist:
if tx_hash in self.requested_tx:
continue
tx = self.wallet.db.get_transaction(tx_hash)
if tx and not isinstance(tx, PartialTransaction):
continue # already have complete tx
transaction_hashes.append(tx_hash)
self.requested_tx[tx_hash] = tx_height
if not transaction_hashes: return
async with OldTaskGroup() as group:
for tx_hash in transaction_hashes:
await group.spawn(self._get_transaction(tx_hash, allow_server_not_finding_tx=allow_server_not_finding_tx))
async def _get_transaction(self, tx_hash, *, allow_server_not_finding_tx=False):
self._requests_sent += 1
try:
async with self._network_request_semaphore:
raw_tx = await self.interface.get_transaction(tx_hash)
except RPCError as e:
# most likely, "No such mempool or blockchain transaction"
if allow_server_not_finding_tx:
self.requested_tx.pop(tx_hash)
return
else:
raise
finally:
self._requests_answered += 1
tx = Transaction(raw_tx)
if tx_hash != tx.txid():
raise SynchronizerFailure(f"received tx does not match expected txid ({tx_hash} != {tx.txid()})")
tx_height = self.requested_tx.pop(tx_hash)
self.wallet.receive_tx_callback(tx_hash, tx, tx_height)
self.logger.info(f"received tx {tx_hash} height: {tx_height} bytes: {len(raw_tx)}")
# callbacks
util.trigger_callback('new_transaction', self.wallet, tx)
async def main(self):
self.wallet.set_up_to_date(False)
# request missing txns, if any
for addr in random_shuffled_copy(self.wallet.db.get_history()):
history = self.wallet.db.get_addr_history(addr)
# Old electrum servers returned ['*'] when all history for the address
# was pruned. This no longer happens but may remain in old wallets.
if history == ['*']: continue
await self._request_missing_txs(history, allow_server_not_finding_tx=True)
# add addresses to bootstrap
for addr in random_shuffled_copy(self.wallet.get_addresses()):
await self._add_address(addr)
# main loop
while True:
await asyncio.sleep(0.1)
await run_in_thread(self.wallet.synchronize)
up_to_date = self.is_up_to_date()
if (up_to_date != self.wallet.is_up_to_date()
or up_to_date and self._processed_some_notifications):
self._processed_some_notifications = False
if up_to_date:
self._reset_request_counters()
self.wallet.set_up_to_date(up_to_date)
util.trigger_callback('wallet_updated', self.wallet)
class Notifier(SynchronizerBase):
"""Watch addresses. Every time the status of an address changes,
an HTTP POST is sent to the corresponding URL.
"""
def __init__(self, network):
SynchronizerBase.__init__(self, network)
self.watched_addresses = defaultdict(list) # type: Dict[str, List[str]]
self._start_watching_queue = asyncio.Queue() # type: asyncio.Queue[Tuple[str, str]]
async def main(self):
# resend existing subscriptions if we were restarted
for addr in self.watched_addresses:
await self._add_address(addr)
# main loop
while True:
addr, url = await self._start_watching_queue.get()
self.watched_addresses[addr].append(url)
await self._add_address(addr)
async def start_watching_addr(self, addr: str, url: str):
await self._start_watching_queue.put((addr, url))
async def stop_watching_addr(self, addr: str):
self.watched_addresses.pop(addr, None)
# TODO blockchain.scripthash.unsubscribe
async def _on_address_status(self, addr, status):
if addr not in self.watched_addresses:
return
self.logger.info(f'new status for addr {addr}')
headers = {'content-type': 'application/json'}
data = {'address': addr, 'status': status}
for url in self.watched_addresses[addr]:
try:
async with make_aiohttp_session(proxy=self.network.proxy, headers=headers) as session:
async with session.post(url, json=data, headers=headers) as resp:
await resp.text()
except Exception as e:
self.logger.info(repr(e))
else:
self.logger.info(f'Got Response for {addr}')
| |
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""Generic aggregator for model updates in federated averaging."""
import math
from tensorflow_federated.python.aggregators import differential_privacy
from tensorflow_federated.python.aggregators import distributed_dp
from tensorflow_federated.python.aggregators import encoded
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.aggregators import mean
from tensorflow_federated.python.aggregators import quantile_estimation
from tensorflow_federated.python.aggregators import robust
from tensorflow_federated.python.aggregators import secure
from tensorflow_federated.python.learning import debug_measurements
def _default_zeroing(
inner_factory: factory.AggregationFactory,
secure_estimation: bool = False) -> factory.AggregationFactory:
"""The default adaptive zeroing wrapper."""
# Adapts very quickly to a value somewhat higher than the highest values so
# far seen.
zeroing_norm = quantile_estimation.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=10.0,
target_quantile=0.98,
learning_rate=math.log(10.0),
multiplier=2.0,
increment=1.0,
secure_estimation=secure_estimation)
if secure_estimation:
secure_count_factory = secure.SecureSumFactory(
upper_bound_threshold=1, lower_bound_threshold=0)
return robust.zeroing_factory(
zeroing_norm,
inner_factory,
zeroed_count_sum_factory=secure_count_factory)
else:
return robust.zeroing_factory(zeroing_norm, inner_factory)
def _default_clipping(
inner_factory: factory.AggregationFactory,
secure_estimation: bool = False) -> factory.AggregationFactory:
"""The default adaptive clipping wrapper."""
# Adapts relatively quickly to a moderately high norm.
clipping_norm = quantile_estimation.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=1.0,
target_quantile=0.8,
learning_rate=0.2,
secure_estimation=secure_estimation)
if secure_estimation:
secure_count_factory = secure.SecureSumFactory(
upper_bound_threshold=1, lower_bound_threshold=0)
return robust.clipping_factory(
clipping_norm,
inner_factory,
clipped_count_sum_factory=secure_count_factory)
else:
return robust.clipping_factory(clipping_norm, inner_factory)
def robust_aggregator(
*,
zeroing: bool = True,
clipping: bool = True,
weighted: bool = True,
add_debug_measurements: bool = False,
) -> factory.AggregationFactory:
"""Creates aggregator for mean with adaptive zeroing and clipping.
Zeroes out extremely large values for robustness to data corruption on
clients, and clips in the L2 norm to moderately high norm for robustness to
outliers.
For details on clipping and zeroing see `tff.aggregators.clipping_factory`
and `tff.aggregators.zeroing_factory`. For details on the quantile-based
adaptive algorithm see `tff.aggregators.PrivateQuantileEstimationProcess`.
Args:
zeroing: Whether to enable adaptive zeroing for data corruption mitigation.
clipping: Whether to enable adaptive clipping in the L2 norm for robustness.
weighted: Whether the mean is weighted (vs. unweighted).
add_debug_measurements: Whether to add measurements suitable for debugging
learning algorithms. For more detail on these measurements, see
`tff.learning.add_debug_measurements`.
Returns:
A `tff.aggregators.AggregationFactory`.
"""
factory_ = mean.MeanFactory() if weighted else mean.UnweightedMeanFactory()
if add_debug_measurements:
factory_ = debug_measurements.add_debug_measurements(factory_)
if clipping:
factory_ = _default_clipping(factory_)
if zeroing:
factory_ = _default_zeroing(factory_)
return factory_
def dp_aggregator(noise_multiplier: float,
clients_per_round: float,
zeroing: bool = True) -> factory.UnweightedAggregationFactory:
"""Creates aggregator with adaptive zeroing and differential privacy.
Zeroes out extremely large values for robustness to data corruption on
clients, and performs adaptive clipping and addition of Gaussian noise for
differentially private learning. For details of the DP algorithm see McMahan
et. al (2017) https://arxiv.org/abs/1710.06963. The adaptive clipping uses the
geometric method described in Thakkar et al. (2019)
https://arxiv.org/abs/1905.03871.
Args:
noise_multiplier: A float specifying the noise multiplier for the Gaussian
mechanism for model updates. A value of 1.0 or higher may be needed for
meaningful privacy. See above mentioned papers to compute (epsilon, delta)
privacy guarantee.
clients_per_round: A float specifying the expected number of clients per
round. Must be positive.
zeroing: Whether to enable adaptive zeroing for data corruption mitigation.
Returns:
A `tff.aggregators.UnweightedAggregationFactory`.
"""
factory_ = differential_privacy.DifferentiallyPrivateFactory.gaussian_adaptive(
noise_multiplier, clients_per_round)
if zeroing:
factory_ = _default_zeroing(factory_)
return factory_
def compression_aggregator(
*,
zeroing: bool = True,
clipping: bool = True,
weighted: bool = True,
add_debug_measurements: bool = False,
) -> factory.AggregationFactory:
"""Creates aggregator with compression and adaptive zeroing and clipping.
Zeroes out extremely large values for robustness to data corruption on
clients and clips in the L2 norm to moderately high norm for robustness to
outliers. After weighting in mean, the weighted values are uniformly quantized
to reduce the size of the model update communicated from clients to the
server. For details, see Suresh et al. (2017)
http://proceedings.mlr.press/v70/suresh17a/suresh17a.pdf. The default
configuration is chosen such that compression does not have adverse effect on
trained model quality in typical tasks.
Args:
zeroing: Whether to enable adaptive zeroing for data corruption mitigation.
clipping: Whether to enable adaptive clipping in the L2 norm for robustness.
Note this clipping is performed prior to the per-coordinate clipping
required for quantization.
weighted: Whether the mean is weighted (vs. unweighted).
add_debug_measurements: Whether to add measurements suitable for debugging
learning algorithms. For more detail on these measurements, see
`tff.learning.add_debug_measurements`.
Returns:
A `tff.aggregators.AggregationFactory`.
"""
factory_ = encoded.EncodedSumFactory.quantize_above_threshold(
quantization_bits=8, threshold=20000)
factory_ = (
mean.MeanFactory(factory_)
if weighted else mean.UnweightedMeanFactory(factory_))
if add_debug_measurements:
factory_ = debug_measurements.add_debug_measurements(factory_)
if clipping:
factory_ = _default_clipping(factory_)
if zeroing:
factory_ = _default_zeroing(factory_)
return factory_
def secure_aggregator(
*,
zeroing: bool = True,
clipping: bool = True,
weighted: bool = True,
) -> factory.AggregationFactory:
"""Creates secure aggregator with adaptive zeroing and clipping.
Zeroes out extremely large values for robustness to data corruption on
clients, clips to moderately high norm for robustness to outliers. After
weighting in mean, the weighted values are summed using cryptographic protocol
ensuring that the server cannot see individual updates until sufficient number
of updates have been added together. For details, see Bonawitz et al. (2017)
https://dl.acm.org/doi/abs/10.1145/3133956.3133982. In TFF, this is realized
using the `tff.federated_secure_sum_bitwidth` operator.
Args:
zeroing: Whether to enable adaptive zeroing for data corruption mitigation.
clipping: Whether to enable adaptive clipping in the L2 norm for robustness.
Note this clipping is performed prior to the per-coordinate clipping
required for secure aggregation.
weighted: Whether the mean is weighted (vs. unweighted).
Returns:
A `tff.aggregators.AggregationFactory`.
"""
secure_clip_bound = quantile_estimation.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=50.0,
target_quantile=0.95,
learning_rate=1.0,
multiplier=2.0,
secure_estimation=True)
factory_ = secure.SecureSumFactory(secure_clip_bound)
if weighted:
factory_ = mean.MeanFactory(
value_sum_factory=factory_,
# Use a power of 2 minus one to more accurately encode floating dtypes
# that actually contain integer values. 2 ^ 20 gives us approximately a
# range of [0, 1 million]. Existing use cases have the weights either
# all ones, or a variant of number of examples processed locally.
weight_sum_factory=secure.SecureSumFactory(
upper_bound_threshold=float(2**20 - 1), lower_bound_threshold=0.0))
else:
factory_ = mean.UnweightedMeanFactory(
value_sum_factory=factory_,
count_sum_factory=secure.SecureSumFactory(
upper_bound_threshold=1, lower_bound_threshold=0))
if clipping:
factory_ = _default_clipping(factory_, secure_estimation=True)
if zeroing:
factory_ = _default_zeroing(factory_, secure_estimation=True)
return factory_
def ddp_secure_aggregator(
noise_multiplier: float,
expected_clients_per_round: int,
bits: int = 20,
zeroing: bool = True,
rotation_type: str = 'hd') -> factory.UnweightedAggregationFactory:
"""Creates aggregator with adaptive zeroing and distributed DP.
Zeroes out extremely large values for robustness to data corruption on
clients, and performs distributed DP (compression, discrete noising, and
SecAgg) with adaptive clipping for differentially private learning. For
details of the two main distributed DP algorithms see
https://arxiv.org/pdf/2102.06387
or https://arxiv.org/pdf/2110.04995.pdf. The adaptive clipping uses the
geometric method described in https://arxiv.org/abs/1905.03871.
Args:
noise_multiplier: A float specifying the noise multiplier (with respect to
the initial L2 cipping) for the distributed DP mechanism for model
updates. A value of 1.0 or higher may be needed for meaningful privacy.
expected_clients_per_round: An integer specifying the expected number of
clients per round. Must be positive.
bits: An integer specifying the bit-width for the aggregation. Note that
this is for the noisy, quantized aggregate at the server and thus should
account for the `expected_clients_per_round`. Must be in the inclusive
range of [1, 22]. This is set to 20 bits by default, and it dictates the
computational and communication efficiency of Secure Aggregation. Setting
it to less than 20 bits should work fine for most cases. For instance, for
an expected number of securely aggregated client updates of 100, 12 bits
should be enough, and for an expected number of securely aggregated client
updates of 1000, 16 bits should be enough.
zeroing: A bool indicating whether to enable adaptive zeroing for data
corruption mitigation. Defaults to `True`.
rotation_type: A string indicating what rotation to use for distributed DP.
Valid options are 'hd' (Hadamard transform) and 'dft' (discrete Fourier
transform). Defaults to `hd`.
Returns:
A `tff.aggregators.UnweightedAggregationFactory`.
"""
agg_factory = distributed_dp.DistributedDpSumFactory(
noise_multiplier=noise_multiplier,
expected_clients_per_round=expected_clients_per_round,
bits=bits,
l2_clip=0.1,
mechanism='distributed_skellam',
rotation_type=rotation_type,
auto_l2_clip=True)
agg_factory = mean.UnweightedMeanFactory(
value_sum_factory=agg_factory,
count_sum_factory=secure.SecureSumFactory(
upper_bound_threshold=1, lower_bound_threshold=0))
if zeroing:
agg_factory = _default_zeroing(agg_factory, secure_estimation=True)
return agg_factory
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WT5 tasks."""
import functools
from . import metrics
from . import postprocessors
from . import preprocessors
import seqio
from t5.data import get_default_vocabulary
from t5.data import glue_utils
from t5.data import postprocessors as t5_postprocessors
from t5.data import preprocessors as t5_preprocessors
from t5.evaluation import metrics as t5_metrics
import tensorflow_datasets as tfds
TaskRegistry = seqio.TaskRegistry
DEFAULT_OUTPUT_FEATURES = {
"inputs":
seqio.Feature(vocabulary=get_default_vocabulary(), add_eos=True),
"targets":
seqio.Feature(vocabulary=get_default_vocabulary(), add_eos=True)
}
# ======================== CoS-E Corpus Task ==================================
TaskRegistry.add(
"cos_e_v001",
source=seqio.TfdsDataSource(tfds_name="cos_e:0.0.1"),
preprocessors=[
preprocessors.cos_e,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.esnli_metric])
# CoS-E with no explanations, and modified prefixes like e-SNLI.
TaskRegistry.add(
"cos_e_v001_0_expln_like_esnli",
source=seqio.TfdsDataSource(tfds_name="cos_e:0.0.1"),
preprocessors=[
functools.partial(
preprocessors.cos_e,
prefix="nli",
question_prefix="premise:",
drop_explanations=True),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.esnli_metric])
n_cos_e_explanations = [5000, 2000, 1000, 500, 200, 100]
for n in n_cos_e_explanations:
TaskRegistry.add(
"cos_e_explanations_take{}_v001".format(n),
source=seqio.TfdsDataSource(
tfds_name="cos_e:0.0.1", splits={"train": "train[0:{}]".format(n)}),
preprocessors=[
preprocessors.cos_e,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
# Skip n in train.
TaskRegistry.add(
"cos_e_labels_skip{}_v001".format(n),
source=seqio.TfdsDataSource(
tfds_name="cos_e:0.0.1", splits={"train": "train[{}:]".format(n)}),
preprocessors=[
functools.partial(
preprocessors.cos_e, prefix="cos_e", drop_explanations=True),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
# Note: cos_e has a validation set (we use the dev set for validation), but no
# test set.
TaskRegistry.add(
"cos_e_eval_v001",
source=seqio.TfdsDataSource(tfds_name="cos_e:0.0.1", splits=["validation"]),
preprocessors=[
preprocessors.cos_e,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.esnli_metric])
# ============== Zero Shot Transfer Tasks for eSNLI and CoS-E ==================
# Note: cos_e has a validation set (we use the dev set for validation), but no
# test set.
# CoS-E evaluation, with modified prefixes like e-SNLI.
TaskRegistry.add(
"cos_e_eval_v001_like_esnli",
source=seqio.TfdsDataSource(tfds_name="cos_e:0.0.1", splits=["validation"]),
preprocessors=[
functools.partial(
preprocessors.cos_e,
prefix="explain nli",
question_prefix="premise:"),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.esnli_metric])
TaskRegistry.add(
"esnli_v010_with_choices",
source=seqio.TfdsDataSource(tfds_name="esnli:0.1.0"),
preprocessors=[
functools.partial(preprocessors.esnli, add_choices=True),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.esnli_metric])
# e-SNLI with no explanations.
TaskRegistry.add(
"esnli_v010_0_expln_with_choices",
source=seqio.TfdsDataSource(tfds_name="esnli:0.1.0"),
preprocessors=[
functools.partial(
preprocessors.esnli,
prefix="nli",
drop_explanations=True,
add_choices=True),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.esnli_metric])
# ======================== e-SNLI Corpus Task ==================================
TaskRegistry.add(
"esnli_v010",
source=seqio.TfdsDataSource(tfds_name="esnli:0.1.0"),
preprocessors=[
preprocessors.esnli,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.esnli_metric])
# e-SNLI with no explanations.
TaskRegistry.add(
"esnli_v010_0_expln",
source=seqio.TfdsDataSource(tfds_name="esnli:0.1.0"),
preprocessors=[
functools.partial(
preprocessors.esnli, prefix="nli", drop_explanations=True),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.esnli_metric])
TaskRegistry.add(
"esnli_eval_v010",
source=seqio.TfdsDataSource(
tfds_name="esnli:0.1.0", splits=["validation", "test"]),
preprocessors=[
preprocessors.esnli,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.esnli_metric])
n_esnli_explanations = [50000, 20000, 10000, 5000, 2000, 1000, 500, 200, 100]
for n in n_esnli_explanations:
# Take n in train.
TaskRegistry.add(
"esnli_explanations_take{}_v010".format(n),
source=seqio.TfdsDataSource(
tfds_name="esnli:0.1.0", splits={"train": "train[0:{}]".format(n)}),
preprocessors=[
preprocessors.esnli,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
# Skip n in train.
TaskRegistry.add(
"esnli_labels_skip{}_v010".format(n),
source=seqio.TfdsDataSource(
tfds_name="esnli:0.1.0", splits={"train": "train[{}:]".format(n)}),
preprocessors=[
functools.partial(
preprocessors.esnli, prefix="nli", drop_explanations=True),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.abstractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
mnli_config = tfds.text.glue.Glue.builder_configs["mnli"]
# pylint: disable=protected-access
TaskRegistry.add(
"mnli_v002",
source=seqio.TfdsDataSource(tfds_name="glue/mnli:1.0.0"),
preprocessors=[
functools.partial(
t5_preprocessors.glue,
benchmark_name="nli",
label_names=mnli_config.label_classes),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
metric_fns=glue_utils.GLUE_METRICS["mnli"],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=glue_utils.get_glue_postprocess_fn(mnli_config),
)
for mnli_eval_set in ("matched", "mismatched"):
TaskRegistry.add(
"mnli_explain_eval_%s_v002" % mnli_eval_set,
source=seqio.TfdsDataSource(tfds_name="glue/mnli_%s:1.0.0" %
mnli_eval_set),
preprocessors=[
functools.partial(
t5_preprocessors.glue,
benchmark_name="explain nli",
label_names=mnli_config.label_classes),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
metric_fns=[metrics.esnli_metric],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=postprocessors.abstractive_explanations,
)
# pylint: enable=protected-access
# ======================== Movie Rationales ======================
TaskRegistry.add(
"movie_rationales_v010",
source=seqio.TfdsDataSource(tfds_name="movie_rationales:0.1.0"),
preprocessors=[
preprocessors.extractive_explanations,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.extractive_explanations_metric])
TaskRegistry.add(
"movie_rationales_v010_no_expl",
source=seqio.TfdsDataSource(tfds_name="movie_rationales:0.1.0"),
preprocessors=[
functools.partial(
preprocessors.extractive_explanations,
drop_explanations=True,
prefix="sentiment"),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
n_movie_explanations = [1000, 500, 200, 100]
for n in n_movie_explanations:
# Take n in train.
TaskRegistry.add(
"movie_rationales_explanations_take{}_v010".format(n),
source=seqio.TfdsDataSource(
tfds_name="movie_rationales:0.1.0",
splits={"train": "train[0:{}]".format(n)}),
preprocessors=[
preprocessors.extractive_explanations,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
# Skip n in train.
TaskRegistry.add(
"movie_rationales_labels_skip{}_v010".format(n),
source=seqio.TfdsDataSource(
tfds_name="movie_rationales:0.1.0",
splits={"train": "train[{}:]".format(n)}),
preprocessors=[
functools.partial(
preprocessors.extractive_explanations, drop_explanations=True),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
TaskRegistry.add(
"movie_rationales_eval_v010",
source=seqio.TfdsDataSource(
tfds_name="movie_rationales:0.1.0", splits=["validation", "test"]),
preprocessors=[
preprocessors.extractive_explanations,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.extractive_explanations_metric])
# ======================= IMDB Movie Reviews =====================
TaskRegistry.add(
"imdb_reviews_v100",
source=seqio.TfdsDataSource(
tfds_name="imdb_reviews:1.0.0", splits=["train", "test"]),
preprocessors=[
preprocessors.imdb_reviews,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=functools.partial(
t5_postprocessors.string_label_to_class_id,
label_classes=["negative", "positive"]),
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[t5_metrics.accuracy])
TaskRegistry.add(
"imdb_reviews_eval_v100",
source=seqio.TfdsDataSource(
tfds_name="imdb_reviews:1.0.0", splits=["test"]),
preprocessors=[
functools.partial(
preprocessors.imdb_reviews, prefix="explain sentiment"),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.extractive_explanations_metric],
)
# ======================== Amazon Reviews ======================
amazon_review_categories = [
b.name for b in tfds.structured.AmazonUSReviews.builder_configs.values()]
for c in amazon_review_categories:
TaskRegistry.add(
"amazon_reviews_{}_v010".format(c.lower()),
source=seqio.TfdsDataSource(
tfds_name="amazon_us_reviews/{}:0.1.0".format(c),
splits={
"train": "train[10%:]",
"validation": "train[:10%]"
}),
preprocessors=[
preprocessors.amazon_reviews,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=functools.partial(
t5_postprocessors.string_label_to_class_id,
label_classes=["negative", "positive"]),
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[t5_metrics.accuracy])
TaskRegistry.add(
"amazon_reviews_{}_eval_v010".format(c.lower()),
source=seqio.TfdsDataSource(
tfds_name="amazon_us_reviews/{}:0.1.0".format(c),
splits={"validation": "train[:10%]"}),
preprocessors=[
functools.partial(
preprocessors.amazon_reviews, prefix="explain sentiment"),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.extractive_explanations_metric])
# ======================== Eraser MultiRC ======================
TaskRegistry.add(
"eraser_multi_rc_v011",
source=seqio.TfdsDataSource(tfds_name="eraser_multi_rc:0.1.1"),
preprocessors=[
preprocessors.eraser_multi_rc,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.extractive_explanations_metric])
n_multi_rc_explanations = [10000, 5000, 2000, 1000, 500, 200, 100]
for n in n_multi_rc_explanations:
# Take n in train.
TaskRegistry.add(
"eraser_multi_rc_explanations_take{}_v011".format(n),
source=seqio.TfdsDataSource(
tfds_name="eraser_multi_rc:0.1.1",
splits={"train": "train[0:{}]".format(n)}),
preprocessors=[
preprocessors.eraser_multi_rc,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
# Skip n in train.
TaskRegistry.add(
"eraser_multi_rc_labels_skip{}_v011".format(n),
source=seqio.TfdsDataSource(
tfds_name="eraser_multi_rc:0.1.1",
splits={"train": "train[{}:]".format(n)}),
preprocessors=[
functools.partial(
preprocessors.eraser_multi_rc, drop_explanations=True),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
TaskRegistry.add(
"eraser_multi_rc_eval_v011",
source=seqio.TfdsDataSource(
tfds_name="eraser_multi_rc:0.1.1", splits=["validation", "test"]),
preprocessors=[
preprocessors.eraser_multi_rc,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocessors.extractive_explanations,
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[metrics.extractive_explanations_metric])
| |
from importlib import import_module
import inspect
import os
import re
from django import template
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.core.exceptions import ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.utils.decorators import method_decorator
from django.utils._os import upath
from django.utils import six
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class BaseAdminDocsView(TemplateView):
"""
Base view for admindocs views.
"""
@method_decorator(staff_member_required)
def dispatch(self, *args, **kwargs):
if not utils.docutils_is_available:
# Display an error message for people without docutils
self.template_name = 'admin_doc/missing_docutils.html'
return self.render_to_response(admin.site.each_context())
return super(BaseAdminDocsView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
kwargs.update({'root_path': urlresolvers.reverse('admin:index')})
kwargs.update(admin.site.each_context())
return super(BaseAdminDocsView, self).get_context_data(**kwargs)
class BookmarkletsView(BaseAdminDocsView):
template_name = 'admin_doc/bookmarklets.html'
def get_context_data(self, **kwargs):
context = super(BookmarkletsView, self).get_context_data(**kwargs)
context.update({
'admin_url': "%s://%s%s" % (
self.request.scheme, self.request.get_host(), context['root_path'])
})
return context
class TemplateTagIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_tag_index.html'
def get_context_data(self, **kwargs):
load_all_installed_template_libraries()
tags = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'tags': tags})
return super(TemplateTagIndexView, self).get_context_data(**kwargs)
class TemplateFilterIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_filter_index.html'
def get_context_data(self, **kwargs):
load_all_installed_template_libraries()
filters = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'filters': filters})
return super(TemplateFilterIndexView, self).get_context_data(**kwargs)
class ViewIndexView(BaseAdminDocsView):
template_name = 'admin_doc/view_index.html'
def get_context_data(self, **kwargs):
views = []
urlconf = import_module(settings.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, namespace, name) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'url': simplify_regex(regex),
'url_name': ':'.join((namespace or []) + (name and [name] or [])),
'namespace': ':'.join((namespace or [])),
'name': name,
})
kwargs.update({'views': views})
return super(ViewIndexView, self).get_context_data(**kwargs)
class ViewDetailView(BaseAdminDocsView):
template_name = 'admin_doc/view_detail.html'
def get_context_data(self, **kwargs):
view = self.kwargs['view']
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
kwargs.update({
'name': view,
'summary': title,
'body': body,
'meta': metadata,
})
return super(ViewDetailView, self).get_context_data(**kwargs)
class ModelIndexView(BaseAdminDocsView):
template_name = 'admin_doc/model_index.html'
def get_context_data(self, **kwargs):
m_list = [m._meta for m in apps.get_models()]
kwargs.update({'models': m_list})
return super(ModelIndexView, self).get_context_data(**kwargs)
class ModelDetailView(BaseAdminDocsView):
template_name = 'admin_doc/model_detail.html'
def get_context_data(self, **kwargs):
# Get the model class.
try:
app_config = apps.get_app_config(self.kwargs['app_label'])
except LookupError:
raise Http404(_("App %(app_label)r not found") % self.kwargs)
try:
model = app_config.get_model(self.kwargs['model_name'])
except LookupError:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % self.kwargs)
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst(
(_("the related `%(app_label)s.%(data_type)s` object") % {
'app_label': app_label, 'data_type': data_type,
}),
'model',
_('model:') + data_type,
)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % field.name,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name': "%s.all" % accessor,
'data_type': 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % accessor,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
kwargs.update({
'name': '%s.%s' % (opts.app_label, opts.object_name),
# Translators: %s is an object type name
'summary': _("Attributes on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
})
return super(ModelDetailView, self).get_context_data(**kwargs)
class TemplateDetailView(BaseAdminDocsView):
template_name = 'admin_doc/template_detail.html'
def get_context_data(self, **kwargs):
template = self.kwargs['template']
templates = []
for dir in settings.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: open(template_file).read() if os.path.exists(template_file) else '',
'order': list(settings.TEMPLATE_DIRS).index(dir),
})
kwargs.update({
'name': template,
'templates': templates,
})
return super(TemplateDetailView, self).get_context_data(**kwargs)
####################
# Helper functions #
####################
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(upath(mod.__file__)))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
libraries = []
for library_name in libraries:
try:
template.get_library(library_name)
except template.InvalidTemplateLibrary:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(
patterns,
base + p.regex.pattern,
(namespace or []) + (p.namespace and [p.namespace] or [])
))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern,
namespace, p.name))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import json
import platform
import subprocess
import logging
from time import sleep
import invoke
from invoke import run, Collection
from website import settings
from utils import pip_install, bin_prefix
logging.getLogger('invoke').setLevel(logging.CRITICAL)
# gets the root path for all the scripts that rely on it
HERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
CONSTRAINTS_PATH = os.path.join(HERE, 'requirements', 'constraints.txt')
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
ns = Collection()
try:
from admin import tasks as admin_tasks
ns.add_collection(Collection.from_module(admin_tasks), name='admin')
except ImportError:
pass
def task(*args, **kwargs):
"""Behaves the same way as invoke.task. Adds the task
to the root namespace.
"""
if len(args) == 1 and callable(args[0]):
new_task = invoke.task(args[0])
ns.add_task(new_task)
return new_task
def decorator(f):
new_task = invoke.task(f, *args, **kwargs)
ns.add_task(new_task)
return new_task
return decorator
@task
def server(host=None, port=5000, debug=True, live=False, gitlogs=False):
"""Run the app server."""
if gitlogs:
git_logs()
from website.app import init_app
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
if settings.SECURE_MODE:
context = (settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
else:
context = None
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH], ssl_context=context)
@task
def git_logs():
from scripts.meta import gatherer
gatherer.main()
@task
def apiserver(port=8000, wait=True, host='127.0.0.1'):
"""Run the API server."""
env = os.environ.copy()
cmd = 'DJANGO_SETTINGS_MODULE=api.base.settings {} manage.py runserver {}:{} --nothreading'\
.format(sys.executable, host, port)
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
if wait:
return run(cmd, echo=True, pty=True)
from subprocess import Popen
return Popen(cmd, shell=True, env=env)
@task
def adminserver(port=8001, host='127.0.0.1'):
"""Run the Admin server."""
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"'
cmd = '{} python manage.py runserver {}:{} --nothreading'.format(env, host, port)
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
run(cmd, echo=True, pty=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
{transaction}
Available variables:
{context}
"""
TRANSACTION_WARNING = """
*** TRANSACTION AUTOMATICALLY STARTED ***
To persist changes run 'commit()'.
Keep in mind that changing documents will lock them.
This feature can be disabled with the '--no-transaction' flag.
"""
def make_shell_context(auto_transact=True):
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
from framework.transactions import commands
from framework.transactions import context as tcontext
app = init_app()
def commit():
commands.commit()
print('Transaction committed.')
if auto_transact:
commands.begin()
print('New transaction opened.')
def rollback():
commands.rollback()
print('Transaction rolled back.')
if auto_transact:
commands.begin()
print('New transaction opened.')
context = {
'transaction': tcontext.TokuTransaction,
'start_transaction': commands.begin,
'commit': commit,
'rollback': rollback,
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
if auto_transact:
commands.begin()
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = '{name}: {obj!r}'.format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell(transaction=True):
context = make_shell_context(auto_transact=transaction)
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context),
transaction=TRANSACTION_WARNING if transaction else ''
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += ' --fork'
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run('mongo {db} --port {port}'.format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = 'mongodump --db {db} --port {port} --out {path}'.format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print('To restore from the dumped database, run `invoke mongorestore {0}`'.format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = 'mongorestore --db {db} --port {port}'.format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += ' --drop'
cmd += ' ' + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_url=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_url:
os.environ['SHAREJS_DB_URL'] = db_url
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run('node {0}'.format(share_server))
@task(aliases=['celery'])
def celery_worker(level='debug', hostname=None, beat=False):
"""Run the Celery process."""
cmd = 'celery worker -A framework.celery_tasks -l {0}'.format(level)
if hostname:
cmd = cmd + ' --hostname={}'.format(hostname)
# beat sets up a cron like scheduler, refer to website/settings
if beat:
cmd = cmd + ' --beat'
run(bin_prefix(cmd), pty=True)
@task(aliases=['beat'])
def celery_beat(level='debug', schedule=None):
"""Run the Celery process."""
# beat sets up a cron like scheduler, refer to website/settings
cmd = 'celery beat -A framework.celery_tasks -l {0} --pidfile='.format(level)
if schedule:
cmd = cmd + ' --schedule={}'.format(schedule)
run(bin_prefix(cmd), pty=True)
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run('rabbitmq-server', pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run('sudo service elasticsearch start')
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print('Your system is not recognized, you will have to start elasticsearch manually')
@task
def migrate_search(delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search():
"""Delete and recreate the index for elasticsearch"""
run('curl -s -XDELETE {uri}/{index}*'.format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
run('curl -s -XPUT {uri}/{index}'.format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search()
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task
def jshint():
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
run(cmd, echo=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
@task(aliases=['req'])
def requirements(base=False, addons=False, release=False, dev=False, metrics=False, quick=False):
"""Install python dependencies.
Examples:
inv requirements
inv requirements --quick
Quick requirements are, in order, addons, dev and the base requirements. You should be able to use --quick for
day to day development.
By default, base requirements will run. However, if any set of addons, release, dev, or metrics are chosen, base
will have to be mentioned explicitly in order to run. This is to remain compatible with previous usages. Release
requirements will prevent dev, metrics, and base from running.
"""
if quick:
base = True
addons = True
dev = True
if not(addons or dev or metrics):
base = True
if release or addons:
addon_requirements()
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
else:
if dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
if metrics: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
if base: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = ' --verbosity={0} -s {1}'.format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module='tests/')
@task
def test_api():
"""Run the API test suite."""
test_module(module='api_tests/')
@task
def test_admin():
"""Run the Admin test suite."""
# test_module(module="admin_tests/")
module = 'admin_tests/'
module_fmt = ' '.join(module) if isinstance(module, list) else module
admin_tasks.manage('test {}'.format(module_fmt))
@task
def test_varnish():
"""Run the Varnish test suite."""
proc = apiserver(wait=False)
sleep(5)
test_module(module='api/caching/tests/test_caching.py')
proc.kill()
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake()
jshint()
test_osf()
test_api()
test_admin()
if all:
test_addons()
karma(single=True, browsers='PhantomJS')
@task
def test_travis_osf():
"""
Run half of the tests to help travis go faster
"""
flake()
jshint()
test_osf()
@task
def test_travis_else():
"""
Run other half of the tests to help travis go faster
"""
test_addons()
test_api()
test_admin()
karma(single=True, browsers='PhantomJS')
@task
def test_travis_varnish():
test_varnish()
@task
def karma(single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires Chrome to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
run(cmd, echo=True)
@task
def wheelhouse(addons=False, release=False, dev=False, metrics=False):
"""Build wheels for python dependencies.
Examples:
inv wheelhouse --dev
inv wheelhouse --addons
inv wheelhouse --release
inv wheelhouse --metrics
"""
if release or addons:
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics:
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def addon_requirements():
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
requirements_file = os.path.join(path, 'requirements.txt')
if os.path.isdir(path) and os.path.isfile(requirements_file):
print('Installing requirements for {0}'.format(directory))
run(
pip_install(requirements_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
print('Finished installing addon requirements')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task(aliases=['bower'])
def bower_install():
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(addons=True, dev=True)
encryption()
# Build nodeCategories.json before building assets
build_js_config_files()
assets(dev=True, watch=False)
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
run('git push origin master', echo=True)
run('git push --tags', echo=True)
run('git push origin develop', echo=True)
@task
def feature(name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
run('git branch -m feature/{}'.format(name), echo=True)
if finish:
run('git flow feature finish {}'.format(name), echo=True)
if push:
run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
'git',
'describe',
'--dirty',
'--tags',
'--long',
'--abbrev=40'
], stderr=subprocess.STDOUT
).decode().split('-')
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == 'dirty':
info['dirty'] = True
describe_out.pop()
info['commit_sha'] = describe_out.pop().lstrip('g')
info['distance_to_latest_tag'] = int(describe_out.pop())
info['current_version'] = describe_out.pop().lstrip('v')
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
run(cmd)
@task
def generate_key_nopass(domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
run(cmd)
@task
def generate_csr(domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
run(cmd)
@task
def request_ssl_cert(domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(domain)
generate_key_nopass(domain)
generate_csr(domain)
@task
def bundle_certs(domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
run(cmd)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False, colors=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
args += ['--progress']
if watch:
args += ['--watch']
if colors:
args += ['--colors']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task()
def build_js_config_files():
from website import settings
print('Building JS config files...')
with open(os.path.join(settings.STATIC_FOLDER, 'built', 'nodeCategories.json'), 'wb') as fp:
json.dump(settings.NODE_CATEGORY_MAP, fp)
print('...Done.')
@task()
def assets(dev=False, watch=False, colors=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
build_js_config_files()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev, colors=colors)
@task
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
@task
def update_citation_styles():
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print('Parsed {} styles'.format(total))
@task
def clean(verbose=False):
run('find . -name "*.pyc" -delete', echo=True)
@task(default=True)
def usage():
run('invoke --list')
### Maintenance Tasks ###
@task
def set_maintenance(start=None, end=None):
from website.maintenance import set_maintenance, get_maintenance
"""Set the time period for the maintenance notice to be displayed.
If no start or end values are displayed, default to starting now
and ending 24 hours from now. If no timezone info is passed along,
everything will be converted to UTC.
If a given end time results in a start that is after the end, start
will be changed to be 24 hours before the end time.
Examples:
invoke set_maintenance_state
invoke set_maintenance_state --start 2016-03-16T15:41:00-04:00
invoke set_maintenance_state --end 2016-03-16T15:41:00-04:00
"""
set_maintenance(start, end)
state = get_maintenance()
print('Maintenance notice up for {} to {}.'.format(state['start'], state['end']))
@task
def unset_maintenance():
from website.maintenance import unset_maintenance
print('Taking down maintenance notice...')
unset_maintenance()
print('...Done.')
| |
from __future__ import annotations
from typing import Dict, Text, Any, Optional
import copy
import logging
from packaging import version
from rasa.constants import MINIMUM_COMPATIBLE_VERSION
from rasa.engine.graph import GraphComponent, ExecutionContext
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.storage.resource import Resource
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.core.domain import Domain
from rasa.shared.importers.importer import TrainingDataImporter
import rasa.shared.utils.io
from rasa.utils.tensorflow.constants import EPOCHS
from rasa.graph_components.providers.domain_for_core_training_provider import (
DomainForCoreTrainingProvider,
)
FINGERPRINT_CONFIG = "fingerprint-config"
FINGERPRINT_CORE = "fingerprint-core"
FINGERPRINT_NLU = "fingerprint-nlu"
FINGERPRINT_VERSION = "rasa-version"
logger = logging.getLogger(__name__)
class FinetuningValidator(GraphComponent):
"""Component that checks whether fine-tuning is possible.
This is a component at the beginning of the graph which receives all training data
and raises an exception in case `is_finetuning` is `True` and finetuning is not
possible (e.g. because new labels were added).
In case we are doing a regular training (and not finetuning) this persists the
necessary information extracted from the training data to be able to validate when
initialized via load whether we can finetune.
Finetuning is possible if, compared to the initial training phase, it holds that
1. the configuration (except for "epoch" keys) does not change
2. the domain (except for e.g. "responses") does not change - or we're not
finetuning the core part
3. the intents, entities, entity groups, entity roles, and action names that
appeared in the original NLU training data, appear in the NLU training data
used for finetuning, and no new such items (i.e. intents, entities, entity
groups, entity roles, or action names) have been added, compared to the original
training data - or we're not finetuning the nlu part.
Note that even though conditions 2. and 3. differ based on which part we finetune,
condition 1. always covers both parts, i.e. NLU and Core.
"""
FILENAME = "fingerprints-for-validation.json"
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""Default config for ProjectProvider."""
return {"validate_core": True, "validate_nlu": True}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
fingerprints: Optional[Dict[Text, Text]] = None,
) -> None:
"""Instantiates a `FineTuningValidator`.
Args:
model_storage: Storage which graph components can use to persist and load
themselves.
resource: Resource locator for this component which can be used to persist
and load itself from the `model_storage`.
execution_context: Information about the current graph run.
fingerprints: a dictionary of fingerprints generated by a
`FineTuningValidator`
"""
self._is_finetuning = execution_context.is_finetuning
self._execution_context = execution_context
self._model_storage = model_storage
self._resource = resource
self._fingerprints: Dict[Text, Text] = fingerprints or {}
self._core = config["validate_core"]
self._nlu = config["validate_nlu"]
def validate(self, importer: TrainingDataImporter,) -> TrainingDataImporter:
"""Validates whether we can finetune Core and NLU when finetuning is enabled.
Args:
importer: a training data importer
Raises:
`InvalidConfigException` if there is a conflict
Returns:
Training Data Importer.
"""
self._validate(importer)
return importer
def _validate(self, importer: TrainingDataImporter) -> None:
"""Validate whether the finetuning setting conflicts with other settings.
Note that this validation always takes into account the configuration of
nlu *and* core part, while the validation of aspects of the domain and
the NLU training data only happen if we request to validate finetuning
with respect to NLU/Core models, respectively.
For more details, see docstring of this class.
Args:
importer: a training data importer
Raises:
`InvalidConfigException` if there is a conflict
"""
if self._is_finetuning and not self._fingerprints:
raise InvalidConfigException(
f"Finetuning is enabled but the {self.__class__.__name__} "
f"does not remember seeing a training run. Ensure that you have "
f"trained your model at least once (with finetuning disabled) "
f"and ensure that the {self.__class__.__name__} is part of the "
f"training graph. "
)
rasa_version = rasa.__version__
if self._is_finetuning:
old_rasa_version = self._fingerprints[FINGERPRINT_VERSION]
if version.parse(old_rasa_version) < version.parse(
MINIMUM_COMPATIBLE_VERSION
):
raise InvalidConfigException(
f"The minimum compatible Rasa Version is "
f"{MINIMUM_COMPATIBLE_VERSION} but the model we attempt to "
f"finetune has been generated with an older version "
f"({old_rasa_version}."
)
self._fingerprints[FINGERPRINT_VERSION] = rasa_version
fingerprint_config = self._get_fingerprint_of_schema_without_irrelevant_keys()
self._compare_or_memorize(
fingerprint_key=FINGERPRINT_CONFIG,
new_fingerprint=fingerprint_config,
error_message=(
"Cannot finetune because more than just the 'epoch' keys have been "
"changed in the configuration. "
"Please revert your configuration and only change "
"the 'epoch' settings where needed."
),
)
if self._core:
# NOTE: If there's a consistency check between domain and core training data
# that ensures domain and core training data are consistent, then we can
# drop this check.
fingerprint_core = self._get_fingerprint_of_domain_pruned_for_core(
domain=importer.get_domain()
)
self._compare_or_memorize(
fingerprint_key=FINGERPRINT_CORE,
new_fingerprint=fingerprint_core,
error_message=(
"Cannot finetune because keys that affect the training of core "
"components have changed."
"Please revert all settings in your domain file that affect the "
"training of core components."
),
)
if self._nlu:
fingerprint_nlu = importer.get_nlu_data().label_fingerprint()
self._compare_or_memorize(
fingerprint_key=FINGERPRINT_NLU,
new_fingerprint=fingerprint_nlu,
error_message=(
"Cannot finetune because NLU training data contains new labels "
"or does not contain any examples for some known labels. "
"Please make sure that the NLU data that you use "
"for finetuning contains at least one example for every label "
"(i.e. intent, action name, ...) that was included in the NLU "
"data used for training the model which we attempt to finetune "
"now. Moreover, you must not add labels that were not included "
"during training before. "
),
)
self.persist()
def _compare_or_memorize(
self, fingerprint_key: Text, new_fingerprint: Text, error_message: Text,
) -> None:
"""Compares given fingerprint if we are finetuning, otherwise just saves it.
Args:
fingerprint_key: name of the fingerprint
new_fingerprint: a new fingerprint value
error_message: message of `InvalidConfigException` that will be raised if
a fingerprint is stored under `fingerprint_key` and differs from the
`new_fingerprint` - and we're in finetuning mode (according to the
execution context of this component)
Raises:
`InvalidConfigException` if and old fingerprint exists and differs from
the new one
"""
if self._is_finetuning:
old_fingerprint = self._fingerprints[fingerprint_key]
if old_fingerprint != new_fingerprint:
raise InvalidConfigException(error_message)
else:
self._fingerprints[fingerprint_key] = new_fingerprint
@staticmethod
def _get_fingerprint_of_domain_pruned_for_core(domain: Domain) -> Text:
"""Returns a fingerprint of a pruned version of the domain relevant for core.
Args:
domain: a domain
Returns:
fingerprint
"""
pruned_domain = DomainForCoreTrainingProvider.create_pruned_version(domain)
return pruned_domain.fingerprint()
def _get_fingerprint_of_schema_without_irrelevant_keys(self,) -> Text:
"""Returns a fingerprint of the given schema with certain items removed.
These items include specifications that do not influence actual training
results such as "eager" mode. The only configuration (in your config) that is
allowed to change is the number of `epochs`.
Returns:
fingerprint
"""
graph_schema = self._execution_context.graph_schema
schema_as_dict = graph_schema.as_dict()
for node_name, node_dict in schema_as_dict["nodes"].items():
config_copy = copy.deepcopy(node_dict["config"])
config_copy.pop(EPOCHS, None)
# ignore default values since they're filled in anyway later and can
# end up in configs (or not) in mysterious ways
defaults = graph_schema.nodes[node_name].uses.get_default_config()
for key, default_value in defaults.items():
if key in config_copy and config_copy[key] == default_value:
config_copy.pop(key)
node_dict["config"] = config_copy
node_dict.pop("eager")
node_dict.pop("constructor_name")
return rasa.shared.utils.io.deep_container_fingerprint(schema_as_dict)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> FinetuningValidator:
"""Creates a new `FineTuningValidator` (see parent class for full docstring)."""
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
)
def persist(self) -> None:
"""Persists this `FineTuningValidator`."""
with self._model_storage.write_to(self._resource) as path:
rasa.shared.utils.io.dump_obj_as_json_to_file(
filename=path / self.FILENAME, obj=self._fingerprints
)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> GraphComponent:
"""Loads a `FineTuningValidator` (see parent class for full docstring)."""
try:
with model_storage.read_from(resource) as path:
fingerprints = rasa.shared.utils.io.read_json_file(
filename=path / cls.FILENAME,
)
return cls(
config=config,
model_storage=model_storage,
execution_context=execution_context,
resource=resource,
fingerprints=fingerprints,
)
except ValueError as e:
raise InvalidConfigException(
f"Loading {cls.__name__} failed. Ensure that the {cls.__name__} "
f"is part of your training graph and re-train your models before "
f"attempting to use the {cls.__name__}."
) from e
| |
import sys
import string
import nltk
from nltk.collocations import *
import nltk.data
from nltk.tokenize import word_tokenize
from StringReplacementTools import RegexpReplacer
"""
TODO : move this stuff to texttools
This contains classes used for filtering words, sentences, ngrams, etc to be used in statistical analysis
"""
class Text:
"""
This is the parent class for text normalization functions
Can be run on an individual record
TODO Extend so can be ran on a record set [??]
"""
def __init__(self, text):
"""
False
@param text: List of text
@param settings: dictionary of booleans
@type settings: C{dictionary}
param['replaceContractions'] is True or False
"""
self.text = text
self.lemmatize = False
self.porter_stem = False
self.remove_numerals = False
self.remove_punctuation = False
self.remove_stops = False
# lowercase all
#self.text = [w.lower() for w in text]
#replace contractions
try:
if self.replace_contractions is True:
replacer = RegexpReplacer()
self.text = [replacer.replace(w) for w in self.text]
except Exception as e:
print(('failed to replace contractions %s' % e))
def set_settings(self, lemmatize=False, porter_stem=False, remove_numerals=True, remove_punctuation=True, remove_stops=True, replace_contractions=True):
"""
Args:
:param lemmatize:
:param porter_stem:
:param remove_numerals:
:param remove_punctuation:
:param remove_stops:
replace_contractions
"""
self.lemmatize = lemmatize
self.porter_stem = porter_stem
self.remove_numerals = remove_numerals
self.remove_punctuation = remove_punctuation
self.remove_stops = remove_stops
self.replace_contractions = replace_contractions
def displayText(self):
"""
Returns the list of altered text
"""
return self.text
class Sentences(Text):
def __init__(self, text, lemmatize=False, porter_stem=False, remove_numerals=True, remove_punctuation=True, remove_stops=True):
Text.__init__(self, text)
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
self.sentences = []
for rec in self.text:
self.sentences.extend(tokenizer.tokenize(rec))
class Words(Text):
def __init__(self, text, lemmatize=False, porter_stem=False, remove_numerals=True, remove_punctuation=True, remove_stops=True):
"""
Parses out a list of words from the text.
param['removePunctuation] is True/False. Governs whether to remove punctuation
param['removeStops'] is True/False. Governs whether to remove stopwords
"""
pass
# self.text = text
# Text.__init__(self, text, settings)
# self.extra_punctuation = ['.', ',', '--', '?', ')', '(', ':', '\'', '"', '""', '-', '}', '{', '://', '/"',
# '\xc2\xb2', '...', '???', '..']
# self.words = []
# # try:
# # try:
# if self.remove_punctuation is True:
# pattern = r'\w+|[^\w\s]+'
# #self.words.extend(regexp_tokenize(self.text, pattern))
# self.words.extend(word_tokenize(self.text))
# self.words = [w for w in self.words if w not in string.punctuation]
# self.words = [w for w in self.words if w not in self.extra_punctuation]
# self.words = [string.lower(w) for w in self.words]
# #print('Punctuation removed')
# else:
# self.words.extend(word_tokenize(self.text))
# self.words = [string.lower(w) for w in self.words]
# #print ('punctuation not removed')
# # except:
# pass
# try:
# if self.remove_stops is True:
# from nltk.corpus import stopwords
# #english_stops = set(stopwords.words('english'))
# english_stops = stopwords.words('english')
# self.w_stop_words = self.words
# self.words = [word for word in self.words if word not in english_stops]
# #print('Removed stops')
# except:
# #print('stop words not removed')
# pass
# try:
# if self.lemmatize is True:
# from nltk.stem import WordNetLemmatizer
#
# lemmatizer = WordNetLemmatizer()
# self.words = [lemmatizer.lemmatize(w) for w in self.words]
# #print('Lemmatized')
# except Exception:
# #print('Not lemmatized')
# pass
# try:
# if self.porter_stem is True:
# from nltk.stem import PorterStemmer
#
# stemmer = PorterStemmer()
# self.words = [stemmer.stem(w) for w in self.words]
# self.stems = self.words
# #print('Porter stemmed ')
# except:
# #print('Not porter stemmed')
# pass
# try:
# if self.remove_numerals is True:
# # filters out all non alphabetical characters
# self.words = [word for word in self.words if word.isalpha() == True]
# #Filters any word composed only of digits
# #self.words = [word for word in self.words if word.isnumeric() == False]
#
# #numbers = [str(0), str(1), str(2), str(3), str(4), str(5), str(6), str(7), str(8), str(9)]
# #Check whether the word is an integer (non-string) and filter
# #self.words = [word for word in self.words if isinstance(word, int) == False]
# #Filter out string digits
# #self.words = [word for word in self.words if word not in string.digits]
# #print ('Removed numerals')
# #realwords = []
# #for w in self.words:
# #num = False
# #for d in w:
# ###if d in string.digits:
# #num = True
# #exit
# #if num == False:
# #realwords.append(w)
# #self.words = realwords
# except:
# #print('Numerals not removed')
# pass
# except Exception, exc:
# sys.exit("processing failed; %s" % str(exc)) # give a error message
def tag_parts_of_speech(self):
self.words_pos = nltk.pos_tag(self.words)
class Ngrams(Words):
"""
Abstract parent class. Don't instantiate
"""
def __init__(self, text, param):
self.settings = param
self.text = text
Words.__init__(self, text, param)
self.bigram_measures = nltk.collocations.BigramAssocMeasures()
self.trigram_measures = nltk.collocations.TrigramAssocMeasures()
class Bigrams(Ngrams):
def __init__(self, text, param):
Ngrams.__init__(self, text, param)
finder = BigramCollocationFinder.from_words(self.words)
# only bigrams that appear 3+ times
finder.apply_freq_filter(3)
# return the 10 n-grams with the highest PMI (i.e., which occur together more often than would be expected)
self.topPMI = finder.nbest(self.bigram_measures.pmi, 10)
# self.topLR = finder.nbest(self.trigram_measures.likelihood_ratio, 10)
class Trigrams(Ngrams):
def __init__(self, text, param):
Ngrams.__init__(self, text, param)
finder = TrigramCollocationFinder.from_words(self.words)
# only bigrams that appear 3+ times
finder.apply_freq_filter(3)
# return the 10 n-grams with the highest PMI (i.e., which occur together more often than would be expected)
self.topPMI = finder.nbest(self.trigram_measures.pmi, 10)
# self.topLR = finder.nbest(self.trigram_measures.likelihood_ratio, 10)
#
#class Lemmatized(Words):
# def __init__(self, text, param):
# Words.__init__(self, text, param)
# from nltk.stem import WordNetLemmatizer
# lemmatizer = WordNetLemmatizer()
# self.lemmas = [lemmatizer.lemmatize(w) for w in self.words]
#
#class PorterStemmed(Words):
# """
# Implements a Porter Stemmer
# """
# def __init__(self, text, param):
# Words.__init__(self, text, param)
# from nltk.stem import PorterStemmer
# stemmer = PorterStemmer()
# self.stems = [stemmer.stem(w) for w in self.words]
# # Group bigrams by first word in bigram.
# prefix_keys = collections.defaultdict(list)
# for key, scores in scored:
# prefix_keys[key[0]].append((key[1], scores))
#
## Sort keyed bigrams by strongest association.
#for key in prefix_keys:
# prefix_keys[key].sort(key = lambda x: -x[1])
#class RegexpReplacer(object):
# def __init__(self, patterns=replacement_patterns):
# self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]
# def replace(self, text):
# s = text
# for (pattern, repl) in self.patterns:
# (s, count) = re.subn(pattern, repl, s)
# return s
#class NGramFinder(Text):
# def __init__(self, text, param):
# from nltk.collocations import BigramCollocationFinder
# from nltk.metrics import BigramAssocMeasures
# bcf = BigramCollocationFinder.from_words(words go here)
# bcf.nbest(BigramAssocMeasures.likelihood_ratio, self.numGram)
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading Google Code issues to an issue service.
"""
import collections
import datetime
import json
import re
import sys
class IdentityDict(dict):
def __missing__(self, key):
return key
def TryFormatDate(date):
"""Attempt to clean up a timestamp date."""
try:
if date.endswith(":"):
date = date[:len(date) - 1]
datetime_version = datetime.datetime.strptime(
date, "%Y-%m-%dT%H:%M:%S.%fZ")
return str(datetime_version)
except ValueError as ve:
return date
def WrapText(text, max):
"""Inserts a newline if any line of a file is > max chars.
Note that the newline is inserted at the first whitespace
character, so there may be lines longer than max.
"""
char_list = list(text)
last_linebreak = 0
for i in range(0, len(char_list)):
if char_list[i] == '\n':
last_linebreak = i
if i - last_linebreak > max and char_list[i] == ' ':
# Replace ' ' with '\n'
char_list.pop(i)
char_list.insert(i, '\n')
last_linebreak = i
return ''.join(char_list)
class Error(Exception):
"""Base error class."""
class InvalidUserError(Error):
"""Error for an invalid user."""
class ProjectNotFoundError(Error):
"""Error for a non-existent project."""
class ServiceError(Error):
"""Error when communicating with the issue or user service."""
class UserService(object):
"""Abstract user operations.
Handles user operations on an user API.
"""
def IsUser(self, username):
"""Checks if the user exists.
Args:
username: The username to check.
Returns:
True if the username exists.
"""
raise NotImplementedError()
class GoogleCodeIssue(object):
"""Google Code issue.
Handles parsing and viewing a Google Code issue.
"""
def __init__(self, issue, project_name, user_map):
"""Initialize the GoogleCodeIssue.
Args:
issue: The Google Code Issue as a dictionary.
project_name: The name of the project the issue belongs to.
user_map: A map from Google Code usernames to issue service names.
"""
self._issue = issue
self._project_name = project_name
self._user_map = user_map
def GetProjectName(self):
"""Returns the project name."""
return self._project_name
def GetUserMap(self):
"""Returns the user map."""
return self._user_map
def GetOwner(self):
"""Get the owner username of a Google Code issue.
This will ALWAYS be the person requesting the issue export.
"""
return self._user_map["user_requesting_export"]
def GetContentUpdatedOn(self):
"""Get the date the content was last updated from a Google Code issue.
Returns:
The time stamp when the issue content was last updated
"""
return self._issue["updated"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code issue.
Returns:
The time stamp when the issue content was created
"""
return self._issue["published"]
def GetId(self):
"""Get the id from a Google Code issue.
Returns:
The issue id
"""
return self._issue["id"]
def GetLabels(self):
"""Get the labels from a Google Code issue.
Returns:
A list of the labels of this issue.
"""
return self._issue.get("labels", [])
def GetKind(self):
"""Get the kind from a Google Code issue.
Returns:
The issue kind, if none is found defaults to 'Defect'
"""
types = [t for t in self.GetLabels() if "Type-" in t]
if types:
return types[0][len("Type-"):]
return "Defect"
def GetPriority(self):
"""Get the priority from a Google Code issue.
Returns:
The issue priority, if none is found defaults to 'Medium'
"""
priorities = [p for p in self.GetLabels() if "Priority-" in p]
if priorities:
return priorities[0][len("Priority-"):]
return "Medium"
def GetAuthor(self):
"""Get the author's username of a Google Code issue.
Returns:
The Google Code username that the issue is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._issue:
return None
author = self._issue["author"]["name"]
return self._user_map[author]
def GetStatus(self):
"""Get the status from a Google Code issue.
Returns:
The issue status
"""
status = self._issue["status"].lower()
if status == "accepted":
status = "open"
return status
def GetTitle(self):
"""Get the title from a Google Code issue.
Returns:
The issue title
"""
return self._issue["title"]
def GetUpdatedOn(self):
"""Get the date the issue was last updated.
Returns:
The time stamp when the issue was last updated
"""
return self.GetCreatedOn()
def _GetDescription(self):
"""Returns the raw description of the issue.
Returns:
The raw issue description as a comment.
"""
return self._issue["comments"]["items"][0]
def GetComments(self):
"""Get the list of comments for the issue (if any).
Returns:
The list of comments attached to the issue
"""
return self._issue["comments"]["items"][1:]
def IsOpen(self):
"""Check if an issue is marked as open.
Returns:
True if the issue was open.
"""
return "state" in self._issue and self._issue["state"] == "open"
def GetDescription(self):
"""Returns the Description of the issue."""
# Just return the description of the underlying comment.
googlecode_comment = GoogleCodeComment(self, self._GetDescription())
return googlecode_comment.GetDescription()
class GoogleCodeComment(object):
"""Google Code Comment.
Handles parsing and viewing a Google Code Comment.
"""
def __init__(self, googlecode_issue, comment):
"""Initialize the GoogleCodeComment.
Args:
googlecode_issue: A GoogleCodeIssue instance.
comment: The Google Code Comment as dictionary.
"""
self._comment = comment
self._googlecode_issue = googlecode_issue
def GetContent(self):
"""Get the content from a Google Code comment.
Returns:
The issue comment
"""
return self._comment["content"]
def GetCreatedOn(self):
"""Get the creation date from a Google Code comment.
Returns:
The time stamp when the issue comment content was created
"""
return self._comment["published"]
def GetId(self):
"""Get the id from a Google Code comment.
Returns:
The issue comment id
"""
return self._comment["id"]
def GetLabels(self):
"""Get the labels modified with the comment."""
if "updates" in self._comment:
if "labels" in self._comment["updates"]:
return self._comment["updates"]["labels"]
return []
def GetIssue(self):
"""Get the GoogleCodeIssue this comment belongs to.
Returns:
The issue id
"""
return self._googlecode_issue
def GetUpdatedOn(self):
"""Get the date the issue comment content was last updated.
Returns:
The time stamp when the issue comment content was last updated
"""
return self.GetCreatedOn()
def GetAuthor(self):
"""Get the author's username of a Google Code issue comment.
Returns:
The Google Code username that the issue comment is authored by or the
repository owner if no mapping or email address exists.
"""
if "author" not in self._comment:
return None
author = self._comment["author"]["name"]
return self.GetIssue().GetUserMap()[author]
def GetDescription(self):
"""Returns the Description of the comment."""
author = self.GetAuthor()
comment_date = self.GetCreatedOn()
comment_text = self.GetContent()
if not comment_text:
comment_text = "(No text was entered with this change)"
# Remove <b> tags, which Codesite automatically includes if issue body is
# based on a prompt.
# TODO(chrsmith): Unescample HTML. e.g. > and á
comment_text = comment_text.replace("<b>", "")
comment_text = comment_text.replace("</b>", "")
comment_text = WrapText(comment_text, 82) # In case it was already wrapped...
body = "```\n" + comment_text + "\n```"
footer = "\n\nOriginal issue reported on code.google.com by `%s` on %s" % (
author, TryFormatDate(comment_date))
# Add label adjustments.
if self.GetLabels():
labels_added = []
labels_removed = []
for label in self.GetLabels():
if label.startswith("-"):
labels_removed.append(label[1:])
else:
labels_added.append(label)
footer += "\n"
if labels_added:
footer += "- **Labels added**: %s\n" % (", ".join(labels_added))
if labels_removed:
footer += "- **Labels removed**: %s\n" % (", ".join(labels_removed))
# Add references to attachments as appropriate.
attachmentLines = []
for attachment in self._comment["attachments"] if "attachments" in self._comment else []:
if "isDeleted" in attachment:
# Deleted attachments won't be found on the issue mirror.
continue
link = "https://storage.googleapis.com/google-code-attachments/%s/issue-%d/comment-%d/%s" % (
self.GetIssue().GetProjectName(), self.GetIssue().GetId(),
self.GetId(), attachment["fileName"])
def has_extension(extension):
return attachment["fileName"].lower().endswith(extension)
is_image_attachment = False
for extension in [".png", ".jpg", ".jpeg", ".bmp", ".tif", ".gif"]:
is_image_attachment |= has_extension(".png")
if is_image_attachment:
line = " * *Attachment: %s<br>*" % (
attachment["fileName"], attachment["fileName"], link)
else:
line = " * *Attachment: [%s](%s)*" % (attachment["fileName"], link)
attachmentLines.append(line)
if len(attachmentLines) > 0:
footer += "\n<hr>\n" + "\n".join(attachmentLines)
# Return the data to send to generate the comment.
return body + footer
class IssueService(object):
"""Abstract issue operations.
Handles creating and updating issues and comments on an user API.
"""
def GetIssues(self, state="open"):
"""Gets all of the issue for the repository.
Args:
state: The state of the repository can be either 'open' or 'closed'.
Returns:
The list of all of the issues for the given repository.
Raises:
IOError: An error occurred accessing previously created issues.
"""
raise NotImplementedError()
def CreateIssue(self, googlecode_issue):
"""Creates an issue.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number of the new issue.
Raises:
ServiceError: An error occurred creating the issue.
"""
raise NotImplementedError()
def CloseIssue(self, issue_number):
"""Closes an issue.
Args:
issue_number: The issue number.
"""
raise NotImplementedError()
def CreateComment(self, issue_number, source_issue_id,
googlecode_comment, project_name):
"""Creates a comment on an issue.
Args:
issue_number: The issue number.
source_issue_id: The Google Code issue id.
googlecode_comment: An instance of GoogleCodeComment
project_name: The Google Code project name.
"""
raise NotImplementedError()
def LoadIssueData(issue_file_path, project_name):
"""Loads issue data from a file.
Args:
issue_file_path: path to the file to load
project_name: name of the project to load
Returns:
Issue data as a list of dictionaries.
Raises:
ProjectNotFoundError: the project_name was not found in the file.
"""
with open(issue_file_path) as user_file:
user_data = json.load(user_file)
user_projects = user_data["projects"]
for project in user_projects:
if project_name == project["name"]:
return project["issues"]["items"]
raise ProjectNotFoundError("Project %s not found" % project_name)
def LoadUserData(user_file_path, user_service):
"""Loads user data from a file. If not present, the user name will
just return whatever is passed to it.
Args:
user_file_path: path to the file to load
user_service: an instance of UserService
"""
identity_dict = IdentityDict()
if not user_file_path:
return identity_dict
with open(user_file_path) as user_data:
user_json = user_data.read()
user_map = json.loads(user_json)["users"]
for username in user_map.values():
if not user_service.IsUser(username):
raise InvalidUserError("%s is not a User" % username)
result.update(user_map)
return result
class IssueExporter(object):
"""Issue Migration.
Handles the uploading issues from Google Code to an issue service.
"""
def __init__(self, issue_service, user_service, issue_json_data,
project_name, user_map):
"""Initialize the IssueExporter.
Args:
issue_service: An instance of IssueService.
user_service: An instance of UserService.
project_name: The name of the project to export to.
issue_json_data: A data object of issues from Google Code.
user_map: A map from user email addresses to service usernames.
"""
self._issue_service = issue_service
self._user_service = user_service
self._issue_json_data = issue_json_data
self._project_name = project_name
self._user_map = user_map
# Mapping from issue ID to the issue's metadata. This is used to verify
# consistency with an previous attempts at exporting issues.
self._previously_created_issues = {}
self._issue_total = 0
self._issue_number = 0
self._comment_number = 0
self._comment_total = 0
self._skipped_issues = 0
def Init(self):
"""Initialize the needed variables."""
self._GetAllPreviousIssues()
def _GetAllPreviousIssues(self):
"""Gets all previously uploaded issues."""
print "Getting any previously added issues..."
open_issues = self._issue_service.GetIssues("open")
closed_issues = self._issue_service.GetIssues("closed")
issues = open_issues + closed_issues
for issue in issues:
# Yes, GitHub's issues API has both ID and Number, and they are
# the opposite of what you think they are.
issue["number"] not in self._previously_created_issues or die(
"GitHub returned multiple issues with the same ID?")
self._previously_created_issues[issue["number"]] = {
"title": issue["title"],
"comment_count": issue["comments"],
}
def _UpdateProgressBar(self):
"""Update issue count 'feed'.
This displays the current status of the script to the user.
"""
feed_string = ("\rIssue: %d/%d -> Comment: %d/%d " %
(self._issue_number, self._issue_total,
self._comment_number, self._comment_total))
sys.stdout.write(feed_string)
sys.stdout.flush()
def _CreateIssue(self, googlecode_issue):
"""Converts an issue from Google Code to an issue service.
This will take the Google Code issue and create a corresponding issue on
the issue service. If the issue on Google Code was closed it will also
be closed on the issue service.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number assigned by the service.
"""
return self._issue_service.CreateIssue(googlecode_issue)
def _CreateComments(self, comments, issue_number, googlecode_issue):
"""Converts a list of issue comment from Google Code to an issue service.
This will take a list of Google Code issue comments and create
corresponding comments on an issue service for the given issue number.
Args:
comments: A list of comments (each comment is just a string).
issue_number: The issue number.
source_issue_id: The Google Code issue id.
"""
self._comment_total = len(comments)
self._comment_number = 0
for comment in comments:
googlecode_comment = GoogleCodeComment(googlecode_issue, comment)
self._comment_number += 1
self._UpdateProgressBar()
self._issue_service.CreateComment(issue_number,
googlecode_issue.GetId(),
googlecode_comment,
self._project_name)
def Start(self):
"""The primary function that runs this script.
This will traverse the issues and attempt to create each issue and its
comments.
"""
print "Starting issue export for '%s'" % (self._project_name)
# If there are existing issues, then confirm they exactly match the Google
# Code issues. Otherwise issue IDs will not match and/or there may be
# missing data.
self._AssertInGoodState()
self._issue_total = len(self._issue_json_data)
self._issue_number = 0
self._skipped_issues = 0
for issue in self._issue_json_data:
googlecode_issue = GoogleCodeIssue(
issue, self._project_name, self._user_map)
self._issue_number += 1
self._UpdateProgressBar()
issue_id = googlecode_issue.GetId()
if issue_id in self._previously_created_issues:
self._skipped_issues += 1
continue
issue_number = self._CreateIssue(googlecode_issue)
if issue_number < 0:
continue
if int(issue_number) != int(googlecode_issue.GetId()):
raise RuntimeError("Google Code and GitHub issue nos mismatch at #%s (got %s)" % (
googlecode_issue.GetId(), issue_number))
comments = googlecode_issue.GetComments()
self._CreateComments(comments, issue_number, googlecode_issue)
if not googlecode_issue.IsOpen():
self._issue_service.CloseIssue(issue_number)
# TODO(chrsmith): Issue a warning if/when the issue ID get out of sync.
if self._skipped_issues > 0:
print ("\nSkipped %d/%d issue previously uploaded." %
(self._skipped_issues, self._issue_total))
def _AssertInGoodState(self):
"""Checks if the last issue exported is sound, otherwise raises an error.
Checks the existing issues that have been exported and confirms that it
matches the issue on Google Code. (Both Title and ID match.) It then
confirms that it has all of the expected comments, adding any missing ones
as necessary.
"""
if len(self._previously_created_issues) == 0:
return
print ("Existing issues detected for the repo. Likely due to a previous\n"
" run being aborted or killed. Checking consistency...")
# Get the last exported issue, and its dual on Google Code.
last_gh_issue_id = -1
for id in self._previously_created_issues:
if id > last_gh_issue_id:
last_gh_issue_id = id
last_gh_issue = self._previously_created_issues[last_gh_issue_id]
last_gc_issue = None
for issue in self._issue_json_data:
if int(issue["id"]) == int(last_gh_issue_id) and (
issue["title"] == last_gh_issue["title"]):
last_gc_issue = GoogleCodeIssue(issue,
self._project_name,
self._user_map)
break
if last_gc_issue is None:
raise RuntimeError(
"Unable to find Google Code issue #%s '%s'.\n"
" Were issues added to GitHub since last export attempt?" % (
last_gh_issue_id, last_gh_issue["title"]))
print "Last issue (#%s) matches. Checking comments..." % (last_gh_issue_id)
# Check comments. Add any missing ones as needed.
num_gc_issue_comments = len(last_gc_issue.GetComments())
if last_gh_issue["comment_count"] != num_gc_issue_comments:
print "GitHub issue has fewer comments than Google Code's. Fixng..."
for idx in range(last_gh_issue["comment_count"], num_gc_issue_comments):
comment = last_gc_issue.GetComments()[idx]
googlecode_comment = GoogleCodeComment(last_gc_issue, comment)
# issue_number == source_issue_id
self._issue_service.CreateComment(
int(last_gc_issue.GetId()), int(last_gc_issue.GetId()),
googlecode_comment, self._project_name)
print " Added comment #%s." % (idx + 1)
print "Done! Issue tracker now in expected state. Ready for more exports."
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class BatchMatrixDiagTest(tf.test.TestCase):
_use_gpu = False
def testVector(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
v_diag = tf.batch_matrix_diag(v)
self.assertEqual((3, 3), v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
def testBatchVector(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]])
mat_batch = np.array(
[[[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]])
v_batch_diag = tf.batch_matrix_diag(v_batch)
self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must have rank at least 1"):
tf.batch_matrix_diag(0)
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = tf.placeholder(dtype=tf.float32)
with self.assertRaisesOpError("input must be at least 1-dim"):
tf.batch_matrix_diag(v).eval(feed_dict={v: 0.0})
def testGrad(self):
shapes = ((3,), (7, 4))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = tf.constant(np.random.rand(*shape), np.float32)
y = tf.batch_matrix_diag(x)
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
class BatchMatrixDiagGpuTest(BatchMatrixDiagTest):
_use_gpu = True
class BatchMatrixSetDiagTest(tf.test.TestCase):
_use_gpu = False
def testVector(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.array([[0.0, 1.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0]])
mat_set_diag = np.array([[1.0, 1.0, 0.0],
[1.0, 2.0, 1.0],
[1.0, 1.0, 3.0]])
output = tf.batch_matrix_set_diag(mat, v)
self.assertEqual((3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag, output.eval())
def testBatchVector(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[-1.0, -2.0, -3.0],
[-4.0, -5.0, -6.0]])
mat_batch = np.array(
[[[1.0, 0.0, 3.0],
[0.0, 2.0, 0.0],
[1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0],
[0.0, 5.0, 0.0],
[2.0, 0.0, 6.0]]])
mat_set_diag_batch = np.array(
[[[-1.0, 0.0, 3.0],
[0.0, -2.0, 0.0],
[1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0],
[0.0, -5.0, 0.0],
[2.0, 0.0, -6.0]]])
output = tf.batch_matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, output.eval())
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must have rank at least 2"):
tf.batch_matrix_set_diag(0, [0])
with self.assertRaisesRegexp(ValueError, "must have rank at least 1"):
tf.batch_matrix_set_diag([[0]], 0)
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = tf.placeholder(dtype=tf.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
tf.batch_matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError(
r"but received input shape: \[1,1\] and diagonal shape: \[\]"):
tf.batch_matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
def testGrad(self):
shapes = ((3, 4, 4), (7, 4, 8, 8))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = tf.constant(np.random.rand(*shape), dtype=tf.float32)
x_diag = tf.constant(np.random.rand(*shape[:-1]), dtype=tf.float32)
y = tf.batch_matrix_set_diag(x, x_diag)
error_x = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
y, y.get_shape().as_list())
self.assertLess(error_x, 1e-4)
error_x_diag = tf.test.compute_gradient_error(
x_diag, x_diag.get_shape().as_list(),
y, y.get_shape().as_list())
self.assertLess(error_x_diag, 1e-4)
def testGradWithNoShapeInformation(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
v = tf.placeholder(dtype=tf.float32)
mat = tf.placeholder(dtype=tf.float32)
grad_input = tf.placeholder(dtype=tf.float32)
output = tf.batch_matrix_set_diag(mat, v)
grads = tf.gradients(output, [mat, v], grad_ys=grad_input)
grad_input_val = np.random.rand(3, 3).astype(np.float32)
grad_vals = sess.run(
grads, feed_dict={v: 2 * np.ones(3), mat: np.ones((3, 3)),
grad_input: grad_input_val})
self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])
self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),
grad_vals[0])
class BatchMatrixSetDiagGpuTest(BatchMatrixSetDiagTest):
_use_gpu = True
class BatchMatrixDiagPartTest(tf.test.TestCase):
_use_gpu = False
def testMatrix(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = tf.batch_matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
def testBatchMatrix(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]])
mat_batch = np.array(
[[[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]])
self.assertEqual(mat_batch.shape, (2, 3, 3))
mat_batch_diag = tf.batch_matrix_diag_part(mat_batch)
self.assertEqual((2, 3), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must have rank at least 2"):
tf.batch_matrix_diag_part(0)
with self.assertRaisesRegexp(ValueError, r"Dimensions .* not compatible"):
tf.batch_matrix_diag_part([[0, 1], [1, 0], [0, 0]])
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = tf.placeholder(dtype=tf.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
tf.batch_matrix_diag_part(v).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError("last two dimensions must be equal"):
tf.batch_matrix_diag_part(v).eval(
feed_dict={v: [[0, 1], [1, 0], [0, 0]]})
def testGrad(self):
shapes = ((3, 3), (5, 3, 3))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = tf.constant(np.random.rand(*shape), dtype=np.float32)
y = tf.batch_matrix_diag_part(x)
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
class BatchMatrixDiagPartGpuTest(BatchMatrixDiagPartTest):
_use_gpu = True
class DiagTest(tf.test.TestCase):
def diagOp(self, diag, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.diag(tf.convert_to_tensor(diag.astype(dtype)))
out = tf_ans.eval()
tf_ans_inv = tf.diag_part(expected_ans)
inv_out = tf_ans_inv.eval()
self.assertAllClose(out, expected_ans)
self.assertAllClose(inv_out, diag)
self.assertShapeEqual(expected_ans, tf_ans)
self.assertShapeEqual(diag, tf_ans_inv)
def testEmptyTensor(self):
x = np.array([])
expected_ans = np.empty([0, 0])
self.diagOp(x, np.int32, expected_ans)
def testRankOneIntTensor(self):
x = np.array([1, 2, 3])
expected_ans = np.array(
[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankOneFloatTensor(self):
x = np.array([1.1, 2.2, 3.3])
expected_ans = np.array(
[[1.1, 0, 0],
[0, 2.2, 0],
[0, 0, 3.3]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankOneComplexTensor(self):
x = np.array([1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j], dtype = np.complex64)
expected_ans = np.array(
[[1.1 + 1.1j, 0 + 0j, 0 + 0j],
[0 + 0j, 2.2 + 2.2j, 0 + 0j],
[0 + 0j, 0 + 0j, 3.3 + 3.3j]], dtype = np.complex64)
self.diagOp(x, np.complex64, expected_ans)
def testRankTwoIntTensor(self):
x = np.array([[1, 2, 3], [4, 5, 6]])
expected_ans = np.array(
[[[[1, 0, 0], [0, 0, 0]],
[[0, 2, 0], [0, 0, 0]],
[[0, 0, 3], [0, 0, 0]]],
[[[0, 0, 0], [4, 0, 0]],
[[0, 0, 0], [0, 5, 0]],
[[0, 0, 0], [0, 0, 6]]]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankTwoFloatTensor(self):
x = np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]])
expected_ans = np.array(
[[[[1.1, 0, 0], [0, 0, 0]],
[[0, 2.2, 0], [0, 0, 0]],
[[0, 0, 3.3], [0, 0, 0]]],
[[[0, 0, 0], [4.4, 0, 0]],
[[0, 0, 0], [0, 5.5, 0]],
[[0, 0, 0], [0, 0, 6.6]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankTwoComplexTensor(self):
x = np.array([[1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j],
[4.4 + 4.4j, 5.5 + 5.5j, 6.6 + 6.6j]], dtype = np.complex64)
expected_ans = np.array(
[[[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]],
[[0 + 0j, 2.2 + 2.2j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j, 3.3 + 3.3j], [0 + 0j, 0 + 0j, 0 + 0j]]],
[[[0 + 0j, 0 + 0j, 0 + 0j], [4.4 + 4.4j, 0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 5.5 + 5.5j, 0 + 0j]],
[[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 6.6 + 6.6j]]]],
dtype = np.complex64)
self.diagOp(x, np.complex64, expected_ans)
def testRankThreeFloatTensor(self):
x = np.array([[[1.1, 2.2], [3.3, 4.4]],
[[5.5, 6.6], [7.7, 8.8]]])
expected_ans = np.array(
[[[[[[1.1, 0], [0, 0]], [[0, 0], [0, 0]]],
[[[0, 2.2], [0, 0]], [[0, 0], [0, 0]]]],
[[[[0, 0], [3.3, 0]], [[0, 0], [0, 0]]],
[[[0, 0], [0, 4.4]], [[0, 0], [0, 0]]]]],
[[[[[0, 0], [0, 0]], [[5.5, 0], [0, 0]]],
[[[0, 0], [0, 0]], [[0, 6.6], [0, 0]]]],
[[[[0, 0], [0, 0]], [[0, 0], [7.7, 0]]],
[[[0, 0], [0, 0]], [[0, 0], [0, 8.8]]]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankThreeComplexTensor(self):
x = np.array([[[1.1 + 1.1j, 2.2 + 2.2j], [3.3 + 3.3j, 4.4 + 4.4j]],
[[5.5 + 5.5j, 6.6 + 6.6j], [7.7 + 7.7j, 8.8 + 8.8j]]],
dtype = np.complex64)
expected_ans = np.array(
[[[[[[1.1 + 1.1j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]],
[[[0 + 0j, 2.2 + 2.2j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]]],
[[[[0 + 0j, 0 + 0j], [3.3 + 3.3j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]],
[[[0 + 0j, 0 + 0j], [0 + 0j, 4.4 + 4.4j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]]]],
[[[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[5.5 + 5.5j, 0 + 0j], [0 + 0j, 0 + 0j]]],
[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 6.6 + 6.6j], [0 + 0j, 0 + 0j]]]],
[[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [7.7 + 7.7j, 0 + 0j]]],
[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 8.8 + 8.8j]]]]]],
dtype = np.complex64)
self.diagOp(x, np.complex64, expected_ans)
class DiagPartOpTest(tf.test.TestCase):
def setUp(self):
np.random.seed(0)
def diagPartOp(self, tensor, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
tensor = tf.convert_to_tensor(tensor.astype(dtype))
tf_ans_inv = tf.diag_part(tensor)
inv_out = tf_ans_inv.eval()
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
def testRankTwoFloatTensor(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankFourFloatTensorUnknownShape(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
for shape in None, (None, 3), (3, None):
with self.test_session(use_gpu=False):
t = tf.convert_to_tensor(x.astype(np.float32))
t.set_shape(shape)
tf_ans = tf.diag_part(t)
out = tf_ans.eval()
self.assertAllClose(out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans)
def testRankFourFloatTensor(self):
x = np.random.rand(2, 3, 2, 3)
i = np.arange(2)[:, None]
j = np.arange(3)
expected_ans = x[i, j, i, j]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankSixFloatTensor(self):
x = np.random.rand(2, 2, 2, 2, 2, 2)
i = np.arange(2)[:, None, None]
j = np.arange(2)[:, None]
k = np.arange(2)
expected_ans = x[i, j, k, i, j, k]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testOddRank(self):
w = np.random.rand(2)
x = np.random.rand(2, 2, 2)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
def testUnevenDimensions(self):
w = np.random.rand(2, 5)
x = np.random.rand(2, 1, 2, 3)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
class DiagGradOpTest(tf.test.TestCase):
def testDiagGrad(self):
np.random.seed(0)
shapes = ((3,), (3,3), (3,3,3))
dtypes = (tf.float32, tf.float64)
with self.test_session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = tf.constant(np.random.rand(*shape), dtype=dtype)
y = tf.diag(x1)
error = tf.test.compute_gradient_error(x1, x1.get_shape().as_list(),
y, y.get_shape().as_list())
tf.logging.info("error = %f", error)
self.assertLess(error, 1e-4)
class DiagGradPartOpTest(tf.test.TestCase):
def testDiagPartGrad(self):
np.random.seed(0)
shapes = ((3,3), (3,3,3,3))
dtypes = (tf.float32, tf.float64)
with self.test_session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = tf.constant(np.random.rand(*shape), dtype=dtype)
y = tf.diag_part(x1)
error = tf.test.compute_gradient_error(x1, x1.get_shape().as_list(),
y, y.get_shape().as_list())
tf.logging.info("error = %f", error)
self.assertLess(error, 1e-4)
if __name__ == "__main__":
tf.test.main()
| |
"""
Tweets lib application file.
Handle fetching and storing of profile and tweet data.
Fetch profile or tweet data from the Twitter API using tweepy. Then insert the
data into the Tweet and Profile tables of the local database (see
models/tweets.py file). Also apply Campaign and Category labels.
That is done here either using the ORM (custom classes to represent tables
in the database) or by build and executing native SQL statements which will be
several times faster.
For a user interface on fetching and inserting data, see the utils directory.
Steps required to get profiles and their tweets:
1. Start with a Twitter screen name or screen names, read as
list in the command-line arguments or read from a text file.
2. Get the Profile data for the users and store in the database, either
creating the record or updating if record exists in Profile table.
3. Get tweets from the timeline of the user and store in Tweets table, with a
link back to the Profile record. Repeat for all profiles of interest.
"""
import json
import math
from typing import Union
import tweepy
from sqlobject import SQLObjectNotFound
from sqlobject.dberrors import DuplicateEntryError
from sqlobject.sqlbuilder import Insert, LIKE
from tweepy.error import TweepError
import lib
import lib.text_handling
from lib import database as db
from lib.twitter_api import authentication
# This null character is invisible but appears sometimes such in profile
# description from Twitter and cannot be inserted due to SQLite execute error.
NULL = "\x00"
# TODO Can this be done as an override for all fields when inserting into the
# model? Like for init / update or similar.
def clean(v):
return v.replace(NULL, "")
def _parse_tweepy_profile(fetchedProfile):
"""
:param tweepy.User fetchedProfile: User data as fetched from Twitter API.
:return: Simplified user data, as a dict.
"""
return {
"guid": fetchedProfile.id,
"screenName": fetchedProfile.screen_name,
"name": fetchedProfile.name,
"description": clean(fetchedProfile.description),
"location": fetchedProfile.location,
"imageUrl": fetchedProfile.profile_image_url_https,
"followersCount": fetchedProfile.followers_count,
"statusesCount": fetchedProfile.statuses_count,
"verified": fetchedProfile.verified,
}
def _parse_tweepy_tweet(fetchedTweet, profileID):
"""
:param tweepy.Status fetchedTweet: Tweet data as fetched from the Twitter
API.
:param int profileID: ID of the Profile record in the database which
is the tweet author.
:return tweetData: Simplified tweet data, as a dict.
"""
# Assume extended mode (as set on the API request), otherwise fall back to
# standard mode.
try:
text = fetchedTweet.full_text
except AttributeError:
text = fetchedTweet.text
return {
"guid": fetchedTweet.id,
"profileID": profileID,
"createdAt": fetchedTweet.created_at,
"message": text,
"favoriteCount": fetchedTweet.favorite_count,
"retweetCount": fetchedTweet.retweet_count,
"inReplyToTweetGuid": fetchedTweet.in_reply_to_status_id,
"inReplyToProfileGuid": fetchedTweet.in_reply_to_user_id,
}
def _getProfile(APIConn, screenName=None, userID=None):
"""
Get data of one profile from the Twitter API, for a specified user.
Either screenName string or userID integer must be specified, but not both.
:param APIConn: authenticated API connection object.
:param screenName: The name of Twitter user to fetch, as a string.
:param userID: The ID of the Twitter user to fetch, as an integer.
Cannot be set if screenName is also set.
:return tweepy.User: instance for requested Twitter user.
"""
assert (
screenName or userID
), "Expected either screenName (str) or userID (int) to be set."
assert not (
screenName and userID
), "Cannot set both screenName ({screenName}) and userID ({userID}).".format(
screenName=screenName, userID=userID
)
if screenName:
print("Fetching user: @{screenName}".format(screenName=screenName))
params = {"screen_name": screenName}
else:
print("Fetching user ID: {userID}".format(userID=userID))
params = {"user_id": userID}
return APIConn.get_user(**params)
def insertOrUpdateProfile(profile: Union[tweepy.User, dict]):
"""
Insert record in Profile table or update existing record if it exists.
Replace values in existing record with those fetched from Twitter
API, assuming that any value (except the GUID) could change. Even if their
screen name does change, we know that it is the same Profile based on the
GUID and so can update the existing record instead of inserting a new one.
:param [tweepy.User, dict] profile: Data for a Twitter user.
:return models.tweets.Profile profileRec: Local record for tweet author.
"""
if isinstance(profile, dict):
profileData = profile
else:
profileData = _parse_tweepy_profile(profile)
try:
# Attempt to insert new row, assuming GUID or screenName do not exist.
profileRec = db.Profile(**profileData)
except DuplicateEntryError:
guid = profileData.pop("guid")
profileRec = db.Profile.byGuid(guid)
profileRec.set(**profileData)
return profileRec
def insertOrUpdateProfileBatch(screenNames):
"""
Get Twitter profile data from the Twitter API and store in the database.
Profile records are created, or updated if they already exist.
:param screenNames: list of user screen names as strings, to be fetched
from the Twitter API.
:return successScreenNames: list of user screen names as strings, for the
Profiles which were successfully fetched then inserted/updated in
the db.
:return failedScreenNames: list of user screen names as strings, for the
Profiles which could not be fetched from the Twitter API and
inserted/updated in the db.
"""
APIConn = authentication.getAPIConnection()
successScreenNames = []
failedScreenNames = []
for s in screenNames:
try:
fetchedProf = _getProfile(APIConn, screenName=s)
except TweepError as e:
# The profile could be missing or suspended, so we log it
# and then skip inserting or updating (since we have no data).
print(
"Could not fetch user: @{name}. {error}. {msg}".format(
name=s, error=type(e).__name__, msg=str(e)
)
)
failedScreenNames.append(s)
else:
try:
localProf = insertOrUpdateProfile(fetchedProf)
# Represent log of followers count visually as repeated stars,
# sidestepping error for log of zero.
logFollowers = (
int(math.log10(localProf.followersCount))
if localProf.followersCount
else 0
)
stars = "*" * logFollowers
print(
"Inserted/updated user: {name:20} {stars}".format(
name="@" + localProf.screenName, stars=stars
)
)
successScreenNames.append(s)
except Exception as e:
print(
(
"Could not insert/update user: @{name}. {error}. {msg}".format(
name=s, error=type(e).__name__, msg=str(e)
)
)
)
failedScreenNames.append(s)
return successScreenNames, failedScreenNames
def _getTweets(
APIConn, screenName=None, userID=None, tweetsPerPage=200, pageLimit=1, extended=True
):
"""
Get tweets of one profile from the Twitter API, for a specified user.
Either screenName string or userID integer must be specified, but not both.
The result of (tweetsPerPage)*(pageLimit) indicates the total number
of tweets requested from the API on calling this function.
:param APIConn: authenticated API connection object.
:param screenName: Default None. The name of Twitter user to fetch, as
a string.
:param userID: Default None. The ID of the Twitter user to fetch, as an
integer.
:param tweetsPerPage: Default 200. Count of tweets to get on a page.
The API''s limit is 200 tweets, but a lower value can be used.
The `pageLimit` argument can be used to do additional calls
to get tweets above the 200 limit - see `tweepy.Cursor` method.
:param pageLimit: Default 1. Number of pages of tweets to get by doing
a sequence of queries with a cursor. The number of tweets
on each page is determined by `tweetsPerPage` argument.
:param extended: If True, get the expanded tweet message instead of the
truncated form.
:return list tweetsList: list of tweepy tweet objects for the requested
user.
"""
print("Fetching tweets for user: {0}".format(screenName if screenName else userID))
assert (
screenName or userID
), "Expected either screenName (str) or userID (int) to be set."
assert not (screenName and userID), "Cannot request both screenName and" " userID."
params = {"count": tweetsPerPage}
if extended:
params["tweet_mode"] = "extended"
if screenName:
params["screen_name"] = screenName
else:
params["user_id"] = userID
if pageLimit == 1:
# Do a simple query without paging.
tweets = APIConn.user_timeline(**params)
else:
tweets = []
# Send the request parameters to Cursor object, with the page limit.
for page in tweepy.Cursor(APIConn.user_timeline, **params).pages(pageLimit):
tweets.extend(page)
return tweets
def insertOrUpdateTweet(tweet, profileID, writeToDB=True, onlyUpdateEngagements=True):
"""
Insert or update one record in the Tweet table.
Attempt to insert a new tweet row, but if the GUID exists locally then
retrieve and update the existing record.
:param [tweepy.Status, dict] tweet: Data for a single Tweet as fetched
from the Twitter API.
:param profileID: The ID of the tweet's author, as an integer from
the Profile ID column in the local db and NOT the Profile GUID.
This is used to set the Tweet object's foreign key.
:param writeToDB: Default True. If True, write the fetched tweets
to local database, otherwise print and discard them.
:param onlyUpdateEngagements: Default True to only update the favorite
and retweet count of the tweet in the local db. If False, update
other fields too. Those are expected to be static on the Twitter API,
but if rules change on this repo then it is useful to apply them
historically on existing Tweet records. This flag only affects
existing records.
:return dict data: Formatted Tweet data.
:return tweetRec: If writeToDB is True, then return the Tweet record
which was inserted or updated. Otherwise return None.
"""
if isinstance(tweet, dict):
tweetData = tweet
else:
tweetData = _parse_tweepy_tweet(tweet, profileID)
tweetData["createdAt"] = lib.set_tz(tweetData["createdAt"])
if writeToDB:
try:
tweetRec = db.Tweet(**tweetData)
except DuplicateEntryError:
guid = tweetData.pop("guid")
tweetRec = db.Tweet.byGuid(guid)
if onlyUpdateEngagements:
tweetRec.set(
favoriteCount=tweetData["favoriteCount"],
retweetCount=tweetData["retweetCount"],
)
else:
tweetRec.set(**tweetData)
else:
tweetRec = None
return tweetData, tweetRec
def insertOrUpdateTweetBatch(
profileRecs,
tweetsPerProfile=200,
verbose=False,
writeToDB=True,
campaignRec=None,
onlyUpdateEngagements=True,
):
"""
Get Twitter tweet data from the Twitter API for a batch of profiles
and store their tweets in the database.
The verbose and writeToDB flags can be used together to print tweet
data which would be inserted into the database without actually inserting
it. This can be used preview tweet data without increasing storage or using
time to do inserts and updates.
:param profileRecs: list of Profile objects, to create or update
tweets for. This might be a list from the Profile table which
has been filtered based on a job schedule, or Profiles which
match criteria such as high follower count.
:param tweetsPerProfile: Default 200. Count of tweets to get for each
profile, as an integer. If this is 200 or less, then page limit is
left at 1 and the items per page count is reduced. If this is
more than 200, then the items per page count is left at 200
and page limit is adjusted to get a number of tweets as the
next multiple of 200.
e.g. 550 tweets needs 2 pages to get the first 400 tweets,
plus a 3rd page to the additional 150 tweets.
We simplify to get 200*3 = 600 tweets, to keep the count
consistent on each query.
Note that even if 200 tweets are requested, the API sometimes returns
only 199 and the user may have posted fewer than the requested tweets.
The limit for a single request to the API is 200, therefore any
number up to 200 has the same rate limit cost. It may be useful to set
a number here as 200 or less if we want to get through all the users
quickly, as this takes fewer API queries and fewer db inserts
or updates. Also, consider that a very low number may lead to deadtime,
where the script takes a fixed time to get 200 or 1 tweets and
now that is has processed the 1 requested and the window limit is
hit, it has no Tweet processing to do while waiting for the next rate
limited window. Thought a low value will mean less storage space
is required.
:param verbose: Default False. If True, print the data used to created
a local Tweet record. This data can be printed regardless of whether
the data is written to the db record or not.
:param writeToDB: Default True. If True, write the fetched tweets
to local database, otherwise print and discard them. This is useful
when used in combination with verbose flag which prints the data.
:param campaignRec: Campaign record to assign to the local Tweet records.
Default None to not assign any Campaign.
:param onlyUpdateEngagements: Default True to only update the favorite
and retweet count of the tweet in the local db. If False, update
other fields too. Those are expected to be static on the Twitter API,
but if rules change on this repo then it is useful to apply them
historically on existing Tweet records. This flag only affects
existing records.
:return: None
"""
APIConn = authentication.getAPIConnection()
if tweetsPerProfile <= 200:
tweetsPerPage = tweetsPerProfile
pageLimit = 1
else:
tweetsPerPage = 200
# Round up to get the last page which might have fewerb items
pageLimit = math.ceil(tweetsPerProfile / tweetsPerPage)
for p in profileRecs:
try:
fetchedTweets = _getTweets(
APIConn, userID=p.guid, tweetsPerPage=tweetsPerPage, pageLimit=pageLimit
)
except TweepError as e:
print(
"Could not fetch tweets for user: @{screenName}."
" {type}. {msg}".format(
screenName=p.screenName, type=type(e).__name__, msg=str(e)
)
)
else:
print("User: {0}".format(p.screenName))
if writeToDB:
print("Inserting/updating tweets in db...")
else:
print("Displaying tweets but not inserting/updating...")
added = errors = 0
for f in fetchedTweets:
try:
data, tweetRec = insertOrUpdateTweet(
tweet=f,
profileID=p.id,
writeToDB=writeToDB,
onlyUpdateEngagements=onlyUpdateEngagements,
)
if tweetRec and campaignRec:
try:
campaignRec.addTweet(tweetRec)
except DuplicateEntryError:
# Ignore error if Tweet was already assigned.
pass
if verbose:
if tweetRec:
tweetRec.prettyPrint()
else:
# No record was created, so use data dict.
m = data["message"]
created = data["createdAt"]
data["message"] = lib.text_handling.flattenText(m)
data["createdAt"] = str(lib.set_tz(created))
# TODO: Check if this will raise an error
# on unicode symbols in message.
print(json.dumps(data, indent=4))
added += 1
except Exception as e:
print(
"Could not insert/update tweet `{id}` for user"
" @{screenName}. {type}. {msg}".format(
id=f.id,
screenName=p.screenName,
type=type(e).__name__,
msg=str(e),
)
)
errors += 1
total = added + errors
# Print stats on every 10 processed and on the last item.
if total % 10 == 0 or f == fetchedTweets[-1]:
print(
"Total: {total:2,d}. Added: {added:2,d}. "
"Errors: {errors:2,d}.".format(
total=total, added=added, errors=errors
)
)
def lookupTweetGuids(APIConn, tweetGuids, onlyUpdateEngagements=True):
"""
Lookup tweet GUIDs and store entire tweets and authors in the database.
Receive a list of tweet GUIDs (IDs in the Twitter API), break them into
chunks (lists of up to 100 GUIDs), look them up from the API and then
insert or update the tweets and their authors in the database.
Note that tweet_mode='extended' is not available in tweeypy for
statuses_lookup, though it is used on the other endpoints.
See https://github.com/tweepy/tweepy/issues/785.
:param APIConn: authorised tweepy.API connection.
:param tweetGuids: list of Twitter API tweet GUIDs, as integers or strings.
The list will be a split into a list of chunks each with a max
count of 100 items. The Cursor approach will not work because the
API endpoints limits the number of items be requested and since there
is only ever one page of results.
:param onlyUpdateEngagements: Default True to only update the favorite
and retweet count of the tweet in the local db. If False, update
other fields too. Those are expected to be static on the Twitter API,
but if rules change on this repo then it is useful to apply them
historically on existing Tweet records. This flag only affects
existing records.
:return: None
"""
chunks = [
tweetGuids[i : (i + 100)] for i in range(0, len(tweetGuids), 100) # noqa: E203
]
for chunk in chunks:
fetchedTweetList = APIConn.statuses_lookup(chunk)
for t in fetchedTweetList:
profileRec = insertOrUpdateProfile(profile=t.author)
data, tweetRec = insertOrUpdateTweet(
tweet=t,
profileID=profileRec.id,
onlyUpdateEngagements=onlyUpdateEngagements,
)
tweetRec.prettyPrint()
def updateTweetEngagements(APIConn, tweetRecSelect):
"""
Update engagements of local tweet records.
Expect a select results of Tweets in the db, extract their GUIDs, get the
latest favorite and retweet from the API and then store the updated values.
If any of the looked up Tweet GUIDs are not returned from the API
(deleted/private/reported) then we do not have anything to save for it.
It is necessary to split the records into chunks or pages of up to 100
items, since that is the maxinum number of tweet IDs which the statuses
lookup endpoint allows.
TODO: Instead of expecting tweet record select results, This could be more
efficient by doing a set filtered to where GUID is t.id, provided the
record is there, rather than getting the object and then setting. This can
be even more efficient by fetching of tweets from the API then
doing a single UPDATE query using native SQL, instead of using the ORM.
:param APIConn: API Connection.
:param tweetRecSelect: SQLObject select results for model.Tweet instances,
or simply a list of the instances.
:return: None
"""
# Use list() to get all the records at once, so only one fetch query
# is done. Also, its not possible to do .count() on sliced select results
# and we need to know the total before splitting into chunks of 100 items.
guids = [t.guid for t in list(tweetRecSelect)]
chunks = [guids[i : (i + 100)] for i in range(0, len(guids), 100)] # noqa: E203
for chunk in chunks:
fetchedTweets = APIConn.statuses_lookup(chunk)
for t in fetchedTweets:
tweetRec = db.Tweet.byGuid(t.id)
oldEngagements = (tweetRec.favoriteCount, tweetRec.retweetCount)
tweetRec.set(favoriteCount=t.favorite_count, retweetCount=t.retweet_count)
print(
"Updated tweet GUID: {guid}, fav: {fav:3,d} ({oldFav:3,d}),"
" RT: {rt:3,d} ({oldRt:3,d})".format(
guid=t.id,
fav=t.favorite_count,
oldFav=oldEngagements[0],
rt=t.retweet_count,
oldRt=oldEngagements[1],
)
)
def assignProfileCategory(categoryName, profileRecs=None, screenNames=None):
"""
Assign Categories to Profiles.
Fetch Category or create it if it does not exist. Put Profiles in the
Category but ignore if link exists already. An error is raised
if a Profile does not exist, but previous Profiles in the list still
have been allocated already before the error occurred.
:param categoryName: String. Get a category by name and create it
if it does not exist yet. If Profile records or Profile screen names
are provided, then assign all of those Profiles to the category.
Both Profile inputs can be left as not set to just create the
Category.
:param profileRecs: Default None. List of db Profile records to be
assigned to the category. Cannot be empty if screenNames is also empty.
:param screenNames: Default None. List of Profile screen names to be
assigned to the category. The screen names should exist as Profiles
in the db already (matching on exact case), otherwise an error will
be raised. The screenNames argument cannot be empty if profileRecs
is also empty.
:return tuple of new and existing counts.
- newCnt: Count of new Profile Category links created.
- existingCnt: Count of Profile Category links not created because
they already exist.
"""
newCnt = 0
existingCnt = 0
try:
categoryRec = db.Category.byName(categoryName)
except SQLObjectNotFound:
categoryRec = db.Category(name=categoryName)
print("Created category: {0}".format(categoryName))
if profileRecs or screenNames:
if profileRecs is None:
# Use screen names to populate an empty profileRecs list.
profileRecs = []
for screenName in screenNames:
profile = db.Profile.select(
LIKE(db.Profile.q.screenName, screenName)
).getOne(None)
if not profile:
raise SQLObjectNotFound(
"Cannot assign Category since Profile screen name"
" is not in db: {0}".format(screenName)
)
profileRecs.append(profile)
for profileRec in profileRecs:
try:
categoryRec.addProfile(profileRec)
newCnt += 1
except DuplicateEntryError:
existingCnt += 1
return newCnt, existingCnt
def assignTweetCampaign(campaignRec, tweetRecs=None, tweetGuids=None):
"""
Assign Campaigns to Tweets using the ORM.
Fetch a Campaign and assign it to Tweets, ignoring existing links
and raising an error on a Campaign which does not exist. For large
batches of inserts, rather use bulkAssignTweetCampaign.
Search query is not considered here and should be set using the
campaign manager utility or the ORM directly.
:param campaignRec: Campaign record to assign to all Tweet
records indicated with tweetRecs or tweetGuids inputs.
Both Tweet inputs can be left as not set to just create the
Campaign. Note that the assignProfileCategory function expects
a Category name because it can be created there, but here the actual
Campaign record is expected because creation must be handled with the
Campaign manager utility instead because of the search query field.
:param tweetRecs: Default None. List of db Tweet records to be
assigned to the campaign. Cannot be empty if tweetGuids is also empty.
:param tweetGuids: Default None. List of Tweet GUIDs to be assigned
to the campaign. The GUIDs should exist as Tweets in the db already,
otherwise an error will be printed and ignored. The tweetGuids
argument cannot be empty if tweetRecs is also empty.
:return newCnt: Count of new Tweet Campaign links created.
:return existingCnt: Count of Tweet Campaign links not created because
they already exist.
"""
newCnt = 0
existingCnt = 0
if not tweetRecs:
# Use GUIDs to populate tweetRecs list.
tweetRecs = []
for guid in tweetGuids:
try:
tweet = db.Tweet.byGuid(guid)
except SQLObjectNotFound:
raise SQLObjectNotFound(
"Cannot assign Campaign as Tweet"
" GUID is not in db: {0}".format(guid)
)
tweetRecs.append(tweet)
for tweet in tweetRecs:
try:
campaignRec.addTweet(tweet)
newCnt += 1
except DuplicateEntryError:
existingCnt += 1
return newCnt, existingCnt
def bulkAssignProfileCategory(categoryID, profileIDs):
"""
Assign Categories to a batch of Profiles using a single INSERT statement.
This function assumes the Category ID and the Profile IDs are for existing
values in the db. Any existing profile_category links which raise a
duplicate error are allowed to fail silently using INSERT OR IGNORE syntax.
:param categoryID: Category record ID to assign to Profile records.
:param profileIDs: Iterable of Profile ID records which must be a linked to
a Category record.
:return SQL: Multi-line SQL statement which was executed.
"""
insert = Insert(
"profile_category",
template=["category_id", "profile_id"],
valueList=[(categoryID, profileID) for profileID in profileIDs],
)
SQL = db.conn.sqlrepr(insert)
SQL = SQL.replace("INSERT", "INSERT OR IGNORE")
db.conn.query(SQL)
return SQL
def bulkAssignTweetCampaign(campaignID, tweetIDs):
"""
Assign Campaigns to a batch of Tweets using a single INSERT statement.
This function assumes the Campaign ID and the Tweet IDs are for existing
values in the db. Any existing tweet_campaign links which raise a
duplicate error are allowed to fail silently using INSERT OR IGNORE syntax.
See SQLite INSERT documentation diagram syntax:
http://www.sqlite.org/lang_insert.html
A single INSERT statement is done here, since a mass-insertion using
the ORM is inefficient:
http://www.sqlobject.org/FAQ.html#how-to-do-mass-insertion
The links in tweet_campaign are relatively simple and require validation
at the schema level rather than the ORM level, therefore it is safe to
use a native SQL statement through sqlbuilder. The implementation is
based on an example here:
http://www.sqlobject.org/SQLBuilder.html#insert
:param campaignID: Campaign record ID to assign to Tweet records.
:param tweetIDs: Iterable of Tweet ID records which must be a linked to
a Campaign record.
:return SQL: Multi-line SQL statement which was executed.
"""
insert = Insert(
"tweet_campaign",
template=["campaign_id", "tweet_id"],
valueList=[(campaignID, tweetID) for tweetID in tweetIDs],
)
SQL = db.conn.sqlrepr(insert)
SQL = SQL.replace("INSERT", "INSERT OR IGNORE")
db.conn.query(SQL)
return SQL
| |
# ***************************************************************************************
# Title: LabAdvComp/parcel
# Author: Joshua S. Miller
# Date: May 26, 2016
# Code version: 0.1.13
# Availability: https://github.com/LabAdvComp/parcel
# ***************************************************************************************
import logging
import math
import os
import pickle
import random
import string
import tempfile
import time
import sys
from intervaltree import Interval, IntervalTree
from gdc_client.parcel.portability import OS_WINDOWS
from gdc_client.parcel.utils import (
get_file_transfer_pbar,
get_percentage_pbar,
md5sum,
mmap_open,
STRIP,
check_file_existence_and_size,
validate_file_md5sum,
)
from gdc_client.parcel.const import SAVE_INTERVAL
if OS_WINDOWS:
WINDOWS = True
from queue import Queue
else:
# if we are running on a posix system, then we will be
# communicating across processes, and will need
# multiprocessing manager
from multiprocessing import Manager
WINDOWS = False
log = logging.getLogger("segment")
class SegmentProducer(object):
save_interval = SAVE_INTERVAL
def __init__(self, download, n_procs):
assert (
download.size is not None
), "Segment producer passed uninitizalied Download!"
self.download = download
self.n_procs = n_procs
self.pbar = None
self.done = False
# Initialize producer
self.load_state()
if not self.done:
self._setup_pbar()
self._setup_queues()
self._setup_work()
self.schedule()
def _setup_pbar(self):
self.pbar = get_file_transfer_pbar(self.download.url, self.download.size)
def _setup_work(self):
work_size = self.integrate(self.work_pool)
self.block_size = work_size // self.n_procs
self.total_tasks = math.ceil(work_size / self.block_size)
log.debug("Total number of tasks: {0}".format(self.total_tasks))
def _setup_queues(self):
if WINDOWS:
self.q_work = Queue()
self.q_complete = Queue()
else:
manager = Manager()
self.q_work = manager.Queue()
self.q_complete = manager.Queue()
def integrate(self, itree):
return sum([i.end - i.begin for i in itree.items()])
def validate_segment_md5sums(self, path=None):
if not self.download.check_segment_md5sums:
return True
corrupt_segments = 0
intervals = sorted(self.completed.items())
log.debug("Checksumming {0}:".format(self.download.url))
pbar = get_percentage_pbar(len(intervals))
with mmap_open(path or self.download.path) as data:
for interval in pbar(intervals):
log.debug("Checking segment md5: {0}".format(interval))
if not interval.data or "md5sum" not in interval.data:
log.error(
STRIP(
"""User opted to check segment md5sums on restart.
Previous download did not record segment
md5sums (--no-segment-md5sums)."""
)
)
return
chunk = data[interval.begin : interval.end]
checksum = md5sum(chunk)
if checksum != interval.data.get("md5sum"):
log.debug(
"Redownloading corrupt segment {0}, {1}.".format(
interval, checksum
)
)
corrupt_segments += 1
self.completed.remove(interval)
if corrupt_segments:
log.warning("Redownloading {0} corrupt segments.".format(corrupt_segments))
def recover_intervals(self) -> bool:
"""Recreate list of completed intervals and calculate remaining work pool
This method checks the status of the following files:
- state_file (*.parcel)
- download_file
- partial temporary file (*.partial)
Returns:
bool: True if recovery occured, otherwise False (which indicates that
the a complete retry of the file download will occur)
"""
state_file_exists = os.path.isfile(self.download.state_path)
download_file_exists = os.path.isfile(self.download.path)
temporary_file_exists = os.path.isfile(self.download.temp_path)
# If the state file does not exist, treat as first time download
if not state_file_exists:
log.debug(
"State file {0} does not exist. Beginning new download...".format(
self.download.state_path
)
)
return False
log.debug(
"Found state file {0}, attempting to resume download".format(
self.download.state_path
)
)
# Attempt to load completed segments from state file
try:
with open(self.download.state_path, "rb") as f:
self.completed = pickle.load(f)
assert isinstance(
self.completed, IntervalTree
), "Bad save state: {0}".format(self.download.state_path)
except Exception as e:
# An error has occured while loading state file.
# Treat as entire file download and recreate temporary file
log.error(
"Unable to resume file state: {0}, will restart entire download".format(
str(e)
)
)
return False
# If the downloaded file exists, validate the downloaded file
# If the file is not complete and correct, retry the entire download
# Recreate the temporary file and return
if download_file_exists:
log.debug(
"A file named {0} found, will attempt to validate file".format(
self.download.path
)
)
if not self.is_complete(self.download.path):
log.warning(
"Downloaded file is not complete, proceeding to restart entire download"
)
return False
# check md5 sum
try:
validate_file_md5sum(self.download, self.download.path)
except Exception as e:
log.error(
"MD5 check of downloaded file failed due to following reason: {0}. Proceeding to restart entire download".format(
str(e)
)
)
return False
log.debug("File is complete, will not attempt to re-download file.")
# downloaded file is correct, set done flag in SegmentProducer
self.done = True
self.work_pool = IntervalTree()
return True
if not temporary_file_exists:
log.debug(
"State file exists but no previous partial file {0} detected. Restarting entire download.".format(
self.download.temp_path
)
)
return False
log.debug(
"Partial file {0} detected. Validating already downloaded segments".format(
self.download.temp_path
)
)
# If temporary file exists, means that a previous download of the file
# failed or was interrupted.
# Check completed segments md5 sums of each completed segment
self.validate_segment_md5sums(self.download.temp_path)
log.debug("Segments checksum validation complete")
self.size_complete = self.integrate(self.completed)
log.debug("size complete: {0}".format(self.size_complete))
# Remove already completed intervals from work_pool
for interval in self.completed:
self.work_pool.chop(interval.begin, interval.end)
return True
def load_state(self):
# Establish default intervals
self.work_pool = IntervalTree([Interval(0, self.download.size)])
self.completed = IntervalTree()
self.size_complete = 0
self.total_tasks = 0
if not self.recover_intervals():
# Recovery failed, treat as new download
self.download.setup_file()
self.completed = IntervalTree()
return
log.debug("State loaded successfully")
def save_state(self):
try:
# Grab a temp file in the same directory (hopefully avoud
# cross device links) in order to atomically write our save file
temp = tempfile.NamedTemporaryFile(
prefix=".parcel_",
dir=os.path.abspath(self.download.state_directory),
delete=False,
)
# Write completed state
pickle.dump(self.completed, temp)
# Make sure all data is written to disk
temp.flush()
os.fsync(temp.fileno())
temp.close()
# Rename temp file as our save file, this could fail if
# the state file and the temp directory are on different devices
if OS_WINDOWS and os.path.exists(self.download.state_path):
# If we're on windows, there's not much we can do here
# except stash the old state file, rename the new one,
# and back up if there is a problem.
old_path = os.path.join(
tempfile.gettempdir(),
"".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(10)
),
)
try:
# stash the old state file
os.rename(self.download.state_path, old_path)
# move the new state file into place
os.rename(temp.name, self.download.state_path)
# if no exception, then delete the old stash
os.remove(old_path)
except Exception as msg:
log.error("Unable to write state file: {0}".format(msg))
try:
os.rename(old_path, self.download.state_path)
except:
pass
raise
else:
# If we're not on windows, then we'll just try to
# atomically rename the file
os.rename(temp.name, self.download.state_path)
except KeyboardInterrupt:
log.warning("Keyboard interrupt. removing temp save file".format(temp.name))
temp.close()
os.remove(temp.name)
except Exception as e:
log.error("Unable to save state: {0}".format(str(e)))
raise
def schedule(self):
while True:
interval = self._get_next_interval()
log.debug("Returning interval: {0}".format(interval))
if not interval:
return
self.q_work.put(interval)
def _get_next_interval(self):
intervals = sorted(self.work_pool.items())
if not intervals:
return None
interval = intervals[0]
start = interval.begin
end = min(interval.end, start + self.block_size)
self.work_pool.chop(start, end)
return Interval(start, end)
def print_progress(self):
if not self.pbar:
return
pbar_value = min(self.pbar.max_value, self.size_complete)
try:
self.pbar.update(pbar_value)
except Exception as e:
log.debug("Unable to update pbar: {}".format(str(e)))
def check_file_exists_and_size(self, file_path):
if self.download.is_regular_file:
return check_file_existence_and_size(file_path, self.download.size)
else:
log.debug("File is not a regular file, refusing to check size.")
return os.path.exists(file_path)
def is_complete(self, file_path):
return self.integrate(
self.completed
) == self.download.size and self.check_file_exists_and_size(file_path)
def finish_download(self):
# Tell the children there is no more work, each child should
# pull one NoneType from the queue and exit
for i in range(self.n_procs):
self.q_work.put(None)
# Wait for all the children to exit by checking to make sure
# that everyone has taken their NoneType from the queue.
# Otherwise, the segment producer will exit before the
# children return, causing them to read from a closed queue
log.debug("Waiting for children to report")
while not self.q_work.empty():
time.sleep(0.1)
# Finish the progressbar
self.pbar.finish()
def wait_for_completion(self):
try:
since_save = 0
num_tasks_completed = 0
while num_tasks_completed != self.total_tasks:
while since_save < self.save_interval:
interval = self.q_complete.get()
# Once a process completes a tasks (sucess or failure),
# it will return a sentinel value (None) to main process
# to indicate that a task was completed
if interval is None:
num_tasks_completed += 1
if num_tasks_completed == self.total_tasks:
break
continue
self.completed.add(interval)
# Get bytes downloaded and update progress bar
this_size = interval.end - interval.begin
self.size_complete += this_size
since_save += this_size
self.print_progress()
since_save = 0
self.save_state()
finally:
self.finish_download()
| |
#!/usr/bin/python
import sys
import imp
import os
import xml.dom.minidom
import re
import datetime
import urllib
BeautifulSoup = imp.load_source('BeautifulSoup', '/home/xbmc/.xbmc/addons/script.module.beautifulsoup/lib/BeautifulSoup.py')
common = imp.load_source('common', '/home/xbmc/.xbmc/addons/script.showgrabber/resources/lib/common.py')
search = imp.load_source('search', '/home/xbmc/.xbmc/addons/script.transmission/resources/lib/search.py')
sys.path.append('/home/xbmc/.xbmc/addons/script.transmission/resources/lib/')
import transmissionrpc
import time
SHOWGRABBER_SETTINGS_FILE_PATH="/home/xbmc/.xbmc/userdata/addon_data/script.showgrabber/settings.xml"
TRANSMISSIONXMBC_SETTINGS_FILE_PATH="/home/xbmc/.xbmc/userdata/addon_data/script.transmission/settings.xml"
SETTINGS_FILE_PATH="/home/xbmc/.xbmc/userdata/addon_data/script.postdownloader/settings.xml"
class Settings(object):
def __init__(self,settings_file,showgrabber_file,transmissionxbmc_file):
try:
# From postdownloader config
self.MAIL_ENABLED=True
self.SENDMAIL_DEST="corvust.xbmc@gmail.com"
self.DEFAULT_NEW_PATH="/home/xbmc/Media/New"
self.TORRENT_DOWNLOAD_PATH="/home/xbmc/Media/.Downloads"
# From Transmission config
self.RPC_HOST="127.0.0.1"
self.RPC_PORT=9091
self.RPC_USER="xbmc"
self.RPC_PASS="xbmcadmin"
self.TRUSTEDONLY=True
# From showgrabber settings
self.TORRENT_FILE_PATH="/home/xbmc/shows.xml"
self.SEARCHER="The Pirate Bay"
doc = xml.dom.minidom.parse(settings_file)
settingsNodes = doc.getElementsByTagName('setting')
for node in settingsNodes:
if node.attributes['id'].value == 'mail_enabled':
if str(node.attributes['value'].value).lower() != 'true':
self.MAIL_ENABLED = False
if node.attributes['id'].value == 'mail_dest':
self.SENDMAIL_DEST = node.attributes['value'].value
if node.attributes['id'].value == 'new_path':
self.DEFAULT_NEW_PATH = node.attributes['value'].value
if node.attributes['id'].value == 'download_path':
self.TORRENT_DOWNLOAD_PATH = node.attributes['value'].value
doc2 = xml.dom.minidom.parse(transmissionxbmc_file)
settingsNodes = doc2.getElementsByTagName('setting')
for node in settingsNodes:
if node.attributes['id'].value == 'rpc_host':
self.RPC_HOST = node.attributes['value'].value
if node.attributes['id'].value == 'rpc_port':
self.RPC_PORT = node.attributes['value'].value
if node.attributes['id'].value == 'rpc_user':
self.RPC_USER = node.attributes['value'].value
if node.attributes['id'].value == 'rpc_password':
self.RPC_PASS = node.attributes['value'].value
if node.attributes['id'].value == 'trusted_uploaders':
if str(node.attributes['value'].value).lower() != 'true':
self.TRUSTEDONLY = False
doc3 = xml.dom.minidom.parse(showgrabber_file)
settingsNodes = doc3.getElementsByTagName('setting')
for node in settingsNodes:
if node.attributes['id'].value == 'search_provider':
self.SEARCHER = node.attributes['value'].value
if node.attributes['id'].value =='file_path':
self.TORRENT_FILE_PATH = node.attributes['value'].value
except:
pass
def sendMail(to_address,subject_text,body_text):
sendmail_location = "/usr/sbin/sendmail" # sendmail location
p = os.popen("%s -t" % sendmail_location, "w")
p.write("To: %s\n" % to_address)
p.write("Subject: %s\n" % subject_text)
p.write("\n") # blank line separating headers from body
p.write("%s" % body_text)
status = p.close()
if status != 0:
print "Sendmail exit status", status
def mover(settings, allshows, tid = None):
''' The mover function is called by transmission when download is complete.
It is responsible for extracting the proper video files from the set
of files downloaded by the torrent, and placing them in the correct
destination directory.
'''
import shutil
import traceback
try:
if tid == None:
torrent_id = os.environ.get('TR_TORRENT_ID')
else:
torrent_id = tid
print 'Torrent ID: %s' % str(torrent_id)
download_path = settings.TORRENT_DOWNLOAD_PATH
default_video_output_path = os.path.join(settings.DEFAULT_NEW_PATH,"Video")
default_audio_output_path = os.path.join(settings.DEFAULT_NEW_PATH,"Audio")
if not os.path.exists(default_video_output_path):
os.makedirs(default_video_output_path)
os.makedirs(default_audio_output_path)
video_extensions = ['mp4', 'mov', 'mkv', 'avi']
audio_extensions = ['mp3']
tc = transmissionrpc.Client(settings.RPC_HOST, port=settings.RPC_PORT, user=settings.RPC_USER, password=settings.RPC_PASS)
files_dict = tc.get_files()
torrent_list = tc.get_torrents()
if torrent_id != None:
id_key = int(torrent_id)
if id_key in files_dict.keys():
video_files = []
audio_files = []
for file_key in files_dict[id_key].keys():
file_ext = (files_dict[id_key][file_key]['name']).rsplit('.',1)[1]
if file_ext in video_extensions and files_dict[id_key][file_key]['name'] not in video_files:
video_files.append(files_dict[id_key][file_key]['name'])
if file_ext in audio_extensions and files_dict[id_key][file_key]['name'] not in audio_files:
audio_files.append(files_dict[id_key][file_key]['name'])
for tfile in audio_files:
try:
shutil.copy(os.path.join(download_path,tfile),default_audio_output_path)
except:
pass
for tfile in video_files:
print ' --> %s' % tfile
matches = []
foundShow = False
for show in allshows.getShows():
if show.enabled:
terms = show.filename.lower().replace('.',' ').split(' ')
query_str = ''
for term in terms:
if term != '':
if query_str != '':
query_str = query_str+'[ .]+'
query_str = query_str + re.escape(term)
query_str = query_str + '.*'
match = re.search(query_str,os.path.basename(tfile).lower())
if match and os.path.basename(tfile).lower().find('sample') == -1:
print ' MATCHES'
foundShow = True
matches.append(show)
if not foundShow:
print 'No match found for torrent id %d' % id_key
try:
shutil.copy(os.path.join(download_path,tfile),default_video_output_path)
except:
pass
else:
# All of the shows in 'matching' appear in the video filename. It stands to reason
#tThat the longest show name will be the best (kinda true right?)
bestmatch = matches[0]
for show in matches:
if len(show.filename) > len(bestmatch.filename):
bestmatch = show
dest_dir = os.path.join(bestmatch.path,'Season %d' % bestmatch.season)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if os.path.exists(dest_dir):
if not os.path.isfile(os.path.join(dest_dir,tfile)):
shutil.copy(os.path.join(download_path,tfile),dest_dir)
# Remove the torrent and trash the data - actually don't, so that we continue to seed and be a good netizen.
# tc.remove_torrent(id_key,True)
if settings.MAIL_ENABLED:
sendMail(settings.SENDMAIL_DEST,'%s - new episode available' % bestmatch.name,'A new episode of %s is available for playback in \
%s/Season %d: %s' % (bestmatch.name, bestmatch.path, bestmatch.season,os.path.basename(tfile)))
else:
print 'no id match:'
for key in files_dict.keys():
print '--> %d' % key
now = time.time()
oneWeek = 60*60*24*7
for id in files_dict.keys():
t = tc.get_torrent(id)
doneDate = t.__getattr__('doneDate')
if doneDate > 0 and doneDate < (now - oneWeek):
print 'Found an old torrent (id = %d) - removing.' % int(id)
tc.remove_torrent(id,True)
except Exception:
exc_details = traceback.format_exc()
print '%s' % exc_details
if settings.MAIL_ENABLED:
sendMail('SENDMAIL_DEST','An error has occurred',exc_details)
def scraper(settings, allshows):
import socket
socket.setdefaulttimeout(15)
# Create the client conneciton to transmission
tc = transmissionrpc.Client(settings.RPC_HOST, port=settings.RPC_PORT, user=settings.RPC_USER, password=settings.RPC_PASS)
activeTorrents = tc.list().values()
torrentFiles = []
for torrent in activeTorrents:
files_dict = tc.get_files(torrent.id)
for id_key in files_dict.keys():
for file_key in files_dict[id_key].keys():
torrentFiles.append(files_dict[id_key][file_key]['name'].lower())
for show in allshows.getShows():
if show.enabled:
try:
dlTorrent = None
# Figure out what the next episode we need is - only download 1 episode per sweep.
dir_path = os.path.join(show.path,'Season %d' % show.season)
if not os.path.exists(show.path):
os.makedirs(show.path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
lastEpisode = 0
for f in os.listdir(dir_path):
terms = show.filename.lower().replace('.',' ').split(' ')
query_str = ''
for term in terms:
if term != '':
if query_str != '':
query_str = query_str+'[ .]+'
query_str = query_str + re.escape(term)
query_str = query_str + '.*'
match = re.search(query_str,f.lower())
if match:
match = re.search("s[0-9]+e[0-9]+",f.lower())
if match:
thisEpisode = int(match.group().split('e')[1])
if thisEpisode > lastEpisode:
lastEpisode = thisEpisode
else:
match = re.search("[ .][0-9]{3,4}[ .]",f)
if match:
i = len(match.group())-3
thisEpisode = int(match.group()[i:i+2])
if thisEpisode > lastEpisode:
lastEpisode = thisEpisode
nextEpisode = lastEpisode + 1
if nextEpisode < show.minepisode:
nextEpisode = show.minepisode
if show.season < 10:
season_str = '0' + str(show.season)
else:
season_str = str(show.season)
if nextEpisode < 10:
episode_str = '0' + str(nextEpisode)
else:
episode_str = str(nextEpisode)
targetName = show.filename + '.s'+season_str+'e'+episode_str
print 'Looking for %s' % targetName
engine = None
if settings.SEARCHER == 'The Pirate Bay':
engine = search.TPB
if settings.SEARCHER == 'Mininova':
engine = search.Mininova
if settings.SEARCHER == 'Kickass':
engine = search.Kickass
results = engine().search(urllib.quote(targetName),{'trusted_uploaders':settings.TRUSTEDONLY})
if len(results) > 0:
dlTorrent = results[0];
found = False
if dlTorrent is not None :
for tfile in torrentFiles:
if tfile.lower().find(targetName.lower()) != -1:
found = True
if not found:
print 'Adding torrent: %s' % dlTorrent['url']
tc.add_uri(dlTorrent['url'])
except Exception as details:
print 'An error occured: %s' % details
time.sleep(10)
if __name__ == '__main__':
# Defaults to 'move' mode
settings = Settings(SETTINGS_FILE_PATH,SHOWGRABBER_SETTINGS_FILE_PATH,TRANSMISSIONXMBC_SETTINGS_FILE_PATH)
config_filename = settings.TORRENT_FILE_PATH
if len(sys.argv) < 2:
mode = 'move'
else:
mode = sys.argv[1]
allshows = common.ShowList(config_filename)
if mode.lower() == 'scrape':
scraper(settings,allshows)
elif mode.lower() == 'move':
if len(sys.argv) > 2:
mover(settings,allshows,int(sys.argv[2]))
else:
mover(settings,allshows)
else:
print 'Invalid mode <%s>. Must enter one of scrape or move' % mode
exit(1)
exit(0)
| |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import numpy as np
from scipy import linalg
from copy import deepcopy
import re
from .cov import read_cov, _get_whitener_data
from .io.constants import FIFF
from .io.pick import pick_types
from .io.proj import make_projector
from .bem import _fit_sphere
from .transforms import (_print_coord_trans, _coord_frame_name,
apply_trans, invert_transform)
from .forward._make_forward import (_get_mri_head_t, _setup_bem,
_prep_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .externals.six import string_types
from .surface import (_bem_find_surface, transform_surface_to,
_normalize_vectors, _get_ico_surface,
_bem_explain_surface, _compute_nearest)
from .source_space import (_make_volume_source_space, SourceSpaces,
_points_outside_surface)
from .parallel import parallel_func
from .fixes import partial
from .utils import logger, verbose, _time_mask
class Dipole(object):
"""Dipole class
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m).
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (nAm).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
"""
def __init__(self, times, pos, amplitude, ori, gof, name=None):
self.times = times
self.pos = pos
self.amplitude = amplitude
self.ori = ori
self.gof = gof
self.name = name
def __repr__(self):
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<Dipole | %s>" % s
def save(self, fname):
"""Save dipole in a .dip file
Parameters
----------
fname : str
The name of the .dip file.
"""
fmt = " %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.1f"
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%\n'
.encode('utf-8'))
t = self.times[:, np.newaxis] * 1000.
gof = self.gof[:, np.newaxis]
amp = 1e9 * self.amplitude[:, np.newaxis]
out = np.concatenate((t, t, self.pos / 1e-3, amp,
self.ori * amp, gof), axis=-1)
np.savetxt(fid, out, fmt=fmt)
if self.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% self.name).encode('utf-8'))
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
"""
mask = _time_mask(self.times, tmin, tmax)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori'):
setattr(self, attr, getattr(self, attr)[mask])
def copy(self):
"""Copy the Dipoles object
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
bgcolor=(1, 1, 1), opacity=0.3,
brain_color=(0.7, 0.7, 0.7), mesh_color=(1, 1, 0),
fig_name=None, fig_size=(600, 600), mode='cone',
scale_factor=0.1e-1, colors=None, verbose=None):
"""Plot dipole locations as arrows
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
mesh_color : tuple of length 3
Mesh color.
fig_name : tuple of length 2
Mayavi figure name.
fig_size : tuple of length 2
Mayavi figure size.
mode : str
Should be ``'cone'`` or ``'sphere'`` to specify how the
dipoles should be shown.
scale_factor : float
The scaling applied to amplitudes for the plot.
colors: list of colors | None
Color to plot with each dipole. If None defaults colors are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
from .viz import plot_dipole_locations
dipoles = []
for t in self.times:
dipoles.append(self.copy())
dipoles[-1].crop(t, t)
return plot_dipole_locations(
dipoles, trans, subject, subjects_dir, bgcolor, opacity,
brain_color, mesh_color, fig_name, fig_size, mode, scale_factor,
colors)
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time
Parameters
----------
color: matplotlib Color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, idx_slice):
"""Handle indexing"""
if isinstance(idx_slice, int): # make sure attributes stay 2d
idx_slice = [idx_slice]
selected_times = self.times[idx_slice].copy()
selected_pos = self.pos[idx_slice, :].copy()
selected_amplitude = self.amplitude[idx_slice].copy()
selected_ori = self.ori[idx_slice, :].copy()
selected_gof = self.gof[idx_slice].copy()
selected_name = self.name
new_dipole = Dipole(selected_times, selected_pos,
selected_amplitude, selected_ori,
selected_gof, selected_name)
return new_dipole
def __len__(self):
"""Handle len function"""
return self.pos.shape[0]
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE
Parameters
----------
fname : str
The name of the .dip file.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dipole : instance of Dipole
The dipole.
"""
try:
data = np.loadtxt(fname, comments='%')
except:
data = np.loadtxt(fname, comments='#') # handle 2 types of comments...
name = None
with open(fname, 'r') as fid:
for line in fid.readlines():
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
break
if data.ndim == 1:
data = data[None, :]
logger.info("%d dipole(s) found" % len(data))
times = data[:, 0] / 1000.
pos = 1e-3 * data[:, 2:5] # put data in meters
amplitude = data[:, 5]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, 6:9] / norm[:, np.newaxis]
gof = data[:, 9]
return Dipole(times, pos, amplitude, ori, gof, name)
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff"""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)
B = np.concatenate(B, axis=1)
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
B = np.dot(B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
def _make_guesses(surf_or_rad, r0, grid, exclude, mindist, n_jobs):
"""Make a guess space inside a sphere or BEM surface"""
if isinstance(surf_or_rad, dict):
surf = surf_or_rad
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_explain_surface(surf['id']),
_coord_frame_name(surf['coord_frame'])))
else:
radius = surf_or_rad[0]
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * radius))
surf = _get_ico_surface(3)
_normalize_vectors(surf['rr'])
surf['rr'] *= radius
surf['rr'] += r0
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']))
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None,
constraint=None):
"""Calculate the residual sum of squares"""
if fwd_svd is None:
dist = constraint(rd)
if dist <= 0:
return 1. - 100 * dist
r1s = rd[np.newaxis, :]
fwd = _dipole_forwards(fwd_data, whitener, r1s)[0]
uu, sing, vv = linalg.svd(fwd, full_matrices=False)
else:
uu, sing, vv = fwd_svd
return 1 - _dipole_gof(uu, sing, vv, B, B2)[0]
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD"""
ncomp = 3 if sing[2] / sing[0] > 0.2 else 2
one = np.dot(vv[:ncomp], B)
Bm2 = np.sum(one ** 2)
return Bm2 / B2, one
def _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig, rd):
"""Fit the dipole moment once the location is known"""
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
uu, sing, vv = linalg.svd(fwd, full_matrices=False)
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
# Counteract the effect of column normalization
Q = scales[0] * np.sum(uu.T[:ncomp] * (one / sing[:ncomp])[:, np.newaxis],
axis=0)
# apply the projector to both elements
B_residual = np.dot(proj_op, B_orig) - np.dot(np.dot(Q, fwd_orig),
proj_op.T)
return Q, gof, B_residual
def _fit_dipoles(data, times, rrs, guess_fwd_svd, fwd_data, whitener,
proj_op, n_jobs):
"""Fit a single dipole to the given whitened, projected data"""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(_fit_dipole, n_jobs)
# parallel over time points
res = parallel(p_fun(B, t, rrs, guess_fwd_svd, fwd_data, whitener, proj_op,
fmin_cobyla)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
residual = np.array([r[4] for r in res]).T
return pos, amp, ori, gof, residual
def _fit_dipole(B_orig, t, rrs, guess_fwd_svd, fwd_data, whitener, proj_op,
fmin_cobyla):
"""Fit a single bit of data"""
logger.info('---- Fitting : %7.1f ms' % (1000 * t,))
B = np.dot(whitener, B_orig)
# make constraint function to keep the solver within the inner skull
if isinstance(fwd_data['inner_skull'], dict): # bem
surf = fwd_data['inner_skull']
def constraint(rd):
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
return -dist
else:
return 1.
else: # sphere
R, r0 = fwd_data['inner_skull']
R_adj = R - 1e-5 # to be sure we don't hit the innermost surf
def constraint(rd):
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
logger.warning('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0
x0 = rrs[np.argmin([_fit_eval(rrs[fi][np.newaxis, :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_fwd_svd)])]
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener,
constraint=constraint)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=1e-4, disp=False)
# Compute the dipole moment at the final point
Q, gof, residual = _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig,
rd_final)
amp = np.sqrt(np.sum(Q * Q))
norm = 1 if amp == 0 else amp
ori = Q / norm
return rd_final, amp, ori, gof, residual
@verbose
def fit_dipole(evoked, cov, bem, trans=None, n_jobs=1, verbose=None):
"""Fit a dipole
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | dict
The BEM filename (str) or a loaded sphere model (dict).
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
n_jobs : int
Number of jobs to run in parallel (used in field computation
and fitting).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dip : instance of Dipole
The dipole fits.
residual : ndarray, shape (n_meeg_channels, n_times)
The good M-EEG data channels with the fitted dipolar activity
removed.
Notes
-----
.. versionadded:: 0.9.0
"""
# This could eventually be adapted to work with other inputs, these
# are what is needed:
data = evoked.data
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, exclude=[]))
if isinstance(bem, string_types):
logger.info('BEM : %s' % bem)
if trans is not None:
logger.info('MRI transform : %s' % trans)
mri_head_t, trans = _get_mri_head_t(trans)
else:
mri_head_t = {'from': FIFF.FIFFV_COORD_HEAD,
'to': FIFF.FIFFV_COORD_MRI, 'trans': np.eye(4)}
bem = _setup_bem(bem, bem, neeg, mri_head_t)
if not bem['is_sphere']:
if trans is None:
raise ValueError('mri must not be None if BEM is provided')
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
logger.info('Grid origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
else:
r0 = bem['r0']
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2]))
if 'layers' in bem:
R = bem['layers'][0]['rad']
else:
R = np.inf
inner_skull = [R, r0]
r0_mri = apply_trans(invert_transform(mri_head_t)['trans'],
r0[np.newaxis, :])[0]
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = 0.005 # 0.01
guess_exclude = 0.02 # 0.02
accurate = False # can be made an option later (shouldn't make big diff)
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm' % (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm' % (1000 * guess_exclude,))
logger.info('Using %s MEG coil definitions.'
% ("accurate" if accurate else "standard"))
if isinstance(cov, string_types):
logger.info('Noise covariance : %s' % (cov,))
cov = read_cov(cov, verbose=False)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
megcoils, compcoils, eegels, megnames, eegnames, meg_info = \
_prep_channels(info, exclude='bads', accurate=accurate)
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True, exclude='bads')
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener = _get_whitener_data(info, cov, picks, verbose=False)
# Proceed to computing the fits (make_guess_data)
logger.info('\n---- Computing the forward solution for the guesses...')
src = _make_guesses(inner_skull, r0_mri,
guess_grid, guess_exclude, guess_mindist,
n_jobs=n_jobs)[0]
if isinstance(inner_skull, dict):
transform_surface_to(inner_skull, 'head', mri_head_t)
transform_surface_to(src, 'head', mri_head_t)
# C code computes guesses using a sphere model for speed, don't bother here
logger.info('Go through all guess source locations...')
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
_prep_field_computation(src['rr'], bem, fwd_data, n_jobs, verbose=False)
guess_fwd = _dipole_forwards(fwd_data, whitener, src['rr'],
n_jobs=n_jobs)[0]
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, full_matrices=False)
for fwd in np.array_split(guess_fwd, len(src['rr']))]
logger.info('[done %d sources]' % src['nuse'])
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
out = _fit_dipoles(data, times, src['rr'], guess_fwd_svd, fwd_data,
whitener, proj_op, n_jobs)
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment)
residual = out[4]
logger.info('%d dipoles fitted' % len(dipoles.times))
return dipoles, residual
| |
"""The tests for the Cast Media player platform."""
# pylint: disable=protected-access
import json
from typing import Optional
from uuid import UUID
import attr
import pytest
from homeassistant.components import tts
from homeassistant.components.cast import media_player as cast
from homeassistant.components.cast.media_player import ChromecastInfo
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from tests.async_mock import ANY, AsyncMock, MagicMock, Mock, patch
from tests.common import MockConfigEntry, assert_setup_component
from tests.components.media_player import common
@pytest.fixture()
def mz_mock():
"""Mock pychromecast MultizoneManager."""
return MagicMock()
@pytest.fixture()
def quick_play_mock():
"""Mock pychromecast quick_play."""
return MagicMock()
@pytest.fixture(autouse=True)
def cast_mock(mz_mock, quick_play_mock):
"""Mock pychromecast."""
pycast_mock = MagicMock()
pycast_mock.start_discovery.return_value = (None, Mock())
dial_mock = MagicMock(name="XXX")
dial_mock.get_device_status.return_value.uuid = "fake_uuid"
dial_mock.get_device_status.return_value.manufacturer = "fake_manufacturer"
dial_mock.get_device_status.return_value.model_name = "fake_model_name"
dial_mock.get_device_status.return_value.friendly_name = "fake_friendly_name"
with patch(
"homeassistant.components.cast.media_player.pychromecast", pycast_mock
), patch(
"homeassistant.components.cast.discovery.pychromecast", pycast_mock
), patch(
"homeassistant.components.cast.media_player.MultizoneManager",
return_value=mz_mock,
), patch(
"homeassistant.components.cast.media_player.zeroconf.async_get_instance",
AsyncMock(),
), patch(
"homeassistant.components.cast.media_player.quick_play",
quick_play_mock,
):
yield
# pylint: disable=invalid-name
FakeUUID = UUID("57355bce-9364-4aa6-ac1e-eb849dccf9e2")
FakeUUID2 = UUID("57355bce-9364-4aa6-ac1e-eb849dccf9e4")
FakeGroupUUID = UUID("57355bce-9364-4aa6-ac1e-eb849dccf9e3")
def get_fake_chromecast(info: ChromecastInfo):
"""Generate a Fake Chromecast object with the specified arguments."""
mock = MagicMock(host=info.host, port=info.port, uuid=info.uuid)
mock.media_controller.status = None
return mock
def get_fake_chromecast_info(
host="192.168.178.42", port=8009, uuid: Optional[UUID] = FakeUUID
):
"""Generate a Fake ChromecastInfo with the specified arguments."""
return ChromecastInfo(
host=host,
port=port,
uuid=uuid,
friendly_name="Speaker",
services={"the-service"},
)
def get_fake_zconf(host="192.168.178.42", port=8009):
"""Generate a Fake Zeroconf object with the specified arguments."""
parsed_addresses = MagicMock()
parsed_addresses.return_value = [host]
service_info = MagicMock(parsed_addresses=parsed_addresses, port=port)
zconf = MagicMock()
zconf.get_service_info.return_value = service_info
return zconf
async def async_setup_cast(hass, config=None):
"""Set up the cast platform."""
if config is None:
config = {}
with patch(
"homeassistant.helpers.entity_platform.EntityPlatform._async_schedule_add_entities"
) as add_entities:
MockConfigEntry(domain="cast").add_to_hass(hass)
await async_setup_component(hass, "cast", {"cast": {"media_player": config}})
await hass.async_block_till_done()
return add_entities
async def async_setup_cast_internal_discovery(hass, config=None):
"""Set up the cast platform and the discovery."""
listener = MagicMock(services={})
browser = MagicMock(zc={})
with patch(
"homeassistant.components.cast.discovery.pychromecast.CastListener",
return_value=listener,
) as cast_listener, patch(
"homeassistant.components.cast.discovery.pychromecast.start_discovery",
return_value=browser,
) as start_discovery:
add_entities = await async_setup_cast(hass, config)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert start_discovery.call_count == 1
discovery_callback = cast_listener.call_args[0][0]
def discover_chromecast(service_name: str, info: ChromecastInfo) -> None:
"""Discover a chromecast device."""
listener.services[info.uuid] = (
{service_name},
info.uuid,
info.model_name,
info.friendly_name,
)
discovery_callback(info.uuid, service_name)
return discover_chromecast, add_entities
async def async_setup_media_player_cast(hass: HomeAssistantType, info: ChromecastInfo):
"""Set up the cast platform with async_setup_component."""
listener = MagicMock(services={})
browser = MagicMock(zc={})
chromecast = get_fake_chromecast(info)
zconf = get_fake_zconf(host=info.host, port=info.port)
with patch(
"homeassistant.components.cast.discovery.pychromecast.get_chromecast_from_service",
return_value=chromecast,
) as get_chromecast, patch(
"homeassistant.components.cast.discovery.pychromecast.CastListener",
return_value=listener,
) as cast_listener, patch(
"homeassistant.components.cast.discovery.pychromecast.start_discovery",
return_value=browser,
), patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf,
):
await async_setup_component(
hass, "cast", {"cast": {"media_player": {"uuid": info.uuid}}}
)
await hass.async_block_till_done()
discovery_callback = cast_listener.call_args[0][0]
service_name = "the-service"
listener.services[info.uuid] = (
{service_name},
info.uuid,
info.model_name,
info.friendly_name,
)
discovery_callback(info.uuid, service_name)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert get_chromecast.call_count == 1
return chromecast
def get_status_callbacks(chromecast_mock, mz_mock=None):
"""Get registered status callbacks from the chromecast mock."""
status_listener = chromecast_mock.register_status_listener.call_args[0][0]
cast_status_cb = status_listener.new_cast_status
connection_listener = chromecast_mock.register_connection_listener.call_args[0][0]
conn_status_cb = connection_listener.new_connection_status
mc = chromecast_mock.socket_client.media_controller
media_status_cb = mc.register_status_listener.call_args[0][0].new_media_status
if not mz_mock:
return cast_status_cb, conn_status_cb, media_status_cb
mz_listener = mz_mock.register_listener.call_args[0][1]
group_media_status_cb = mz_listener.multizone_new_media_status
return cast_status_cb, conn_status_cb, media_status_cb, group_media_status_cb
async def test_start_discovery_called_once(hass):
"""Test pychromecast.start_discovery called exactly once."""
with patch(
"homeassistant.components.cast.discovery.pychromecast.start_discovery",
return_value=Mock(),
) as start_discovery:
await async_setup_cast(hass)
assert start_discovery.call_count == 1
await async_setup_cast(hass)
assert start_discovery.call_count == 1
async def test_stop_discovery_called_on_stop(hass):
"""Test pychromecast.stop_discovery called on shutdown."""
browser = MagicMock(zc={})
with patch(
"homeassistant.components.cast.discovery.pychromecast.start_discovery",
return_value=browser,
) as start_discovery:
# start_discovery should be called with empty config
await async_setup_cast(hass, {})
assert start_discovery.call_count == 1
with patch(
"homeassistant.components.cast.discovery.pychromecast.discovery.stop_discovery"
) as stop_discovery:
# stop discovery should be called on shutdown
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
stop_discovery.assert_called_once_with(browser)
async def test_create_cast_device_without_uuid(hass):
"""Test create a cast device with no UUId does not create an entity."""
info = get_fake_chromecast_info(uuid=None)
cast_device = cast._async_create_cast_device(hass, info)
assert cast_device is None
async def test_create_cast_device_with_uuid(hass):
"""Test create cast devices with UUID creates entities."""
added_casts = hass.data[cast.ADDED_CAST_DEVICES_KEY] = set()
info = get_fake_chromecast_info()
cast_device = cast._async_create_cast_device(hass, info)
assert cast_device is not None
assert info.uuid in added_casts
# Sending second time should not create new entity
cast_device = cast._async_create_cast_device(hass, info)
assert cast_device is None
async def test_replay_past_chromecasts(hass):
"""Test cast platform re-playing past chromecasts when adding new one."""
cast_group1 = get_fake_chromecast_info(host="host1", port=8009, uuid=FakeUUID)
cast_group2 = get_fake_chromecast_info(
host="host2", port=8009, uuid=UUID("9462202c-e747-4af5-a66b-7dce0e1ebc09")
)
zconf_1 = get_fake_zconf(host="host1", port=8009)
zconf_2 = get_fake_zconf(host="host2", port=8009)
discover_cast, add_dev1 = await async_setup_cast_internal_discovery(
hass, config={"uuid": FakeUUID}
)
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_2,
):
discover_cast("service2", cast_group2)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 0
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_1,
):
discover_cast("service1", cast_group1)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
add_dev2 = Mock()
await cast._async_setup_platform(hass, {"host": "host2"}, add_dev2)
await hass.async_block_till_done()
assert add_dev2.call_count == 1
async def test_manual_cast_chromecasts_uuid(hass):
"""Test only wanted casts are added for manual configuration."""
cast_1 = get_fake_chromecast_info(host="host_1", uuid=FakeUUID)
cast_2 = get_fake_chromecast_info(host="host_2", uuid=FakeUUID2)
zconf_1 = get_fake_zconf(host="host_1")
zconf_2 = get_fake_zconf(host="host_2")
# Manual configuration of media player with host "configured_host"
discover_cast, add_dev1 = await async_setup_cast_internal_discovery(
hass, config={"uuid": FakeUUID}
)
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_2,
):
discover_cast("service2", cast_2)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 0
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_1,
):
discover_cast("service1", cast_1)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
async def test_auto_cast_chromecasts(hass):
"""Test all discovered casts are added for default configuration."""
cast_1 = get_fake_chromecast_info(host="some_host")
cast_2 = get_fake_chromecast_info(host="other_host", uuid=FakeUUID2)
zconf_1 = get_fake_zconf(host="some_host")
zconf_2 = get_fake_zconf(host="other_host")
# Manual configuration of media player with host "configured_host"
discover_cast, add_dev1 = await async_setup_cast_internal_discovery(hass)
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_1,
):
discover_cast("service2", cast_2)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_2,
):
discover_cast("service1", cast_1)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 2
async def test_update_cast_chromecasts(hass):
"""Test discovery of same UUID twice only adds one cast."""
cast_1 = get_fake_chromecast_info(host="old_host")
cast_2 = get_fake_chromecast_info(host="new_host")
zconf_1 = get_fake_zconf(host="old_host")
zconf_2 = get_fake_zconf(host="new_host")
# Manual configuration of media player with host "configured_host"
discover_cast, add_dev1 = await async_setup_cast_internal_discovery(hass)
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_1,
):
discover_cast("service1", cast_1)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
with patch(
"homeassistant.components.cast.discovery.ChromeCastZeroconf.get_zeroconf",
return_value=zconf_2,
):
discover_cast("service2", cast_2)
await hass.async_block_till_done()
await hass.async_block_till_done() # having tasks that add jobs
assert add_dev1.call_count == 1
async def test_entity_availability(hass: HomeAssistantType):
"""Test handling of connection status."""
entity_id = "media_player.speaker"
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
state = hass.states.get(entity_id)
assert state.state == "unavailable"
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "unknown"
connection_status = MagicMock()
connection_status.status = "DISCONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "unavailable"
async def test_entity_cast_status(hass: HomeAssistantType):
"""Test handling of cast status."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
cast_status_cb, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
cast_status = MagicMock()
cast_status.volume_level = 0.5
cast_status.volume_muted = False
cast_status_cb(cast_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("volume_level") == 0.5
assert not state.attributes.get("is_volume_muted")
cast_status = MagicMock()
cast_status.volume_level = 0.2
cast_status.volume_muted = True
cast_status_cb(cast_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("volume_level") == 0.2
assert state.attributes.get("is_volume_muted")
async def test_entity_play_media(hass: HomeAssistantType):
"""Test playing media."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
# Play_media
await common.async_play_media(hass, "audio", "best.mp3", entity_id)
chromecast.media_controller.play_media.assert_called_once_with("best.mp3", "audio")
async def test_entity_play_media_cast(hass: HomeAssistantType, quick_play_mock):
"""Test playing media with cast special features."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
# Play_media - cast with app ID
await common.async_play_media(hass, "cast", '{"app_id": "abc123"}', entity_id)
chromecast.start_app.assert_called_once_with("abc123")
# Play_media - cast with app name (quick play)
await common.async_play_media(hass, "cast", '{"app_name": "youtube"}', entity_id)
quick_play_mock.assert_called_once_with(ANY, "youtube", {})
async def test_entity_play_media_cast_invalid(hass, caplog, quick_play_mock):
"""Test playing media."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
# play_media - media_type cast with invalid JSON
with pytest.raises(json.decoder.JSONDecodeError):
await common.async_play_media(hass, "cast", '{"app_id": "abc123"', entity_id)
assert "Invalid JSON in media_content_id" in caplog.text
chromecast.start_app.assert_not_called()
quick_play_mock.assert_not_called()
# Play_media - media_type cast with extra keys
await common.async_play_media(
hass, "cast", '{"app_id": "abc123", "extra": "data"}', entity_id
)
assert "Extra keys dict_keys(['extra']) were ignored" in caplog.text
chromecast.start_app.assert_called_once_with("abc123")
quick_play_mock.assert_not_called()
# Play_media - media_type cast with unsupported app
quick_play_mock.side_effect = NotImplementedError()
await common.async_play_media(hass, "cast", '{"app_name": "unknown"}', entity_id)
quick_play_mock.assert_called_once_with(ANY, "unknown", {})
assert "App unknown not supported" in caplog.text
async def test_entity_play_media_sign_URL(hass: HomeAssistantType):
"""Test playing media."""
entity_id = "media_player.speaker"
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com:8123"},
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, _ = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
# Play_media
await common.async_play_media(hass, "audio", "/best.mp3", entity_id)
chromecast.media_controller.play_media.assert_called_once_with(ANY, "audio")
assert chromecast.media_controller.play_media.call_args[0][0].startswith(
"http://example.com:8123/best.mp3?authSig="
)
async def test_entity_media_content_type(hass: HomeAssistantType):
"""Test various content types."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
media_status = MagicMock(images=None)
media_status.media_is_movie = False
media_status.media_is_musictrack = False
media_status.media_is_tvshow = False
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("media_content_type") is None
media_status.media_is_tvshow = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("media_content_type") == "tvshow"
media_status.media_is_tvshow = False
media_status.media_is_musictrack = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("media_content_type") == "music"
media_status.media_is_musictrack = True
media_status.media_is_movie = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.attributes.get("media_content_type") == "movie"
async def test_entity_control(hass: HomeAssistantType):
"""Test various device and media controls."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
# Turn on
await common.async_turn_on(hass, entity_id)
chromecast.play_media.assert_called_once_with(
"https://www.home-assistant.io/images/cast/splash.png", ANY
)
chromecast.quit_app.reset_mock()
# Turn off
await common.async_turn_off(hass, entity_id)
chromecast.quit_app.assert_called_once_with()
# Mute
await common.async_mute_volume(hass, True, entity_id)
chromecast.set_volume_muted.assert_called_once_with(True)
# Volume
await common.async_set_volume_level(hass, 0.33, entity_id)
chromecast.set_volume.assert_called_once_with(0.33)
# Media play
await common.async_media_play(hass, entity_id)
chromecast.media_controller.play.assert_called_once_with()
# Media pause
await common.async_media_pause(hass, entity_id)
chromecast.media_controller.pause.assert_called_once_with()
# Media previous
await common.async_media_previous_track(hass, entity_id)
chromecast.media_controller.queue_prev.assert_not_called()
# Media next
await common.async_media_next_track(hass, entity_id)
chromecast.media_controller.queue_next.assert_not_called()
# Media seek
await common.async_media_seek(hass, 123, entity_id)
chromecast.media_controller.seek.assert_not_called()
# Enable support for queue and seek
media_status = MagicMock(images=None)
media_status.supports_queue_next = True
media_status.supports_seek = True
media_status_cb(media_status)
await hass.async_block_till_done()
# Media previous
await common.async_media_previous_track(hass, entity_id)
chromecast.media_controller.queue_prev.assert_called_once_with()
# Media next
await common.async_media_next_track(hass, entity_id)
chromecast.media_controller.queue_next.assert_called_once_with()
# Media seek
await common.async_media_seek(hass, 123, entity_id)
chromecast.media_controller.seek.assert_called_once_with(123)
async def test_entity_media_states(hass: HomeAssistantType):
"""Test various entity media states."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb = get_status_callbacks(chromecast)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
media_status = MagicMock(images=None)
media_status.player_is_playing = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "playing"
media_status.player_is_playing = False
media_status.player_is_paused = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "paused"
media_status.player_is_paused = False
media_status.player_is_idle = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "idle"
media_status.player_is_idle = False
chromecast.is_idle = True
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "off"
chromecast.is_idle = False
media_status_cb(media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "unknown"
async def test_group_media_states(hass, mz_mock):
"""Test media states are read from group if entity has no state."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks(
chromecast, mz_mock
)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
group_media_status = MagicMock(images=None)
player_media_status = MagicMock(images=None)
# Player has no state, group is playing -> Should report 'playing'
group_media_status.player_is_playing = True
group_media_status_cb(str(FakeGroupUUID), group_media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "playing"
# Player is paused, group is playing -> Should report 'paused'
player_media_status.player_is_playing = False
player_media_status.player_is_paused = True
media_status_cb(player_media_status)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "paused"
# Player is in unknown state, group is playing -> Should report 'playing'
player_media_status.player_state = "UNKNOWN"
media_status_cb(player_media_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "playing"
async def test_group_media_control(hass, mz_mock):
"""Test media states are read from group if entity has no state."""
entity_id = "media_player.speaker"
reg = await hass.helpers.entity_registry.async_get_registry()
info = get_fake_chromecast_info()
full_info = attr.evolve(
info, model_name="google home", friendly_name="Speaker", uuid=FakeUUID
)
chromecast = await async_setup_media_player_cast(hass, info)
_, conn_status_cb, media_status_cb, group_media_status_cb = get_status_callbacks(
chromecast, mz_mock
)
connection_status = MagicMock()
connection_status.status = "CONNECTED"
conn_status_cb(connection_status)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert state.name == "Speaker"
assert state.state == "unknown"
assert entity_id == reg.async_get_entity_id("media_player", "cast", full_info.uuid)
group_media_status = MagicMock(images=None)
player_media_status = MagicMock(images=None)
# Player has no state, group is playing -> Should forward calls to group
group_media_status.player_is_playing = True
group_media_status_cb(str(FakeGroupUUID), group_media_status)
await common.async_media_play(hass, entity_id)
grp_media = mz_mock.get_multizone_mediacontroller(str(FakeGroupUUID))
assert grp_media.play.called
assert not chromecast.media_controller.play.called
# Player is paused, group is playing -> Should not forward
player_media_status.player_is_playing = False
player_media_status.player_is_paused = True
media_status_cb(player_media_status)
await common.async_media_pause(hass, entity_id)
grp_media = mz_mock.get_multizone_mediacontroller(str(FakeGroupUUID))
assert not grp_media.pause.called
assert chromecast.media_controller.pause.called
# Player is in unknown state, group is playing -> Should forward to group
player_media_status.player_state = "UNKNOWN"
media_status_cb(player_media_status)
await common.async_media_stop(hass, entity_id)
grp_media = mz_mock.get_multizone_mediacontroller(str(FakeGroupUUID))
assert grp_media.stop.called
assert not chromecast.media_controller.stop.called
# Verify play_media is not forwarded
await common.async_play_media(hass, "music", "best.mp3", entity_id)
assert not grp_media.play_media.called
assert chromecast.media_controller.play_media.called
async def test_failed_cast_on_idle(hass, caplog):
"""Test no warning when unless player went idle with reason "ERROR"."""
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = False
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert "Failed to cast media" not in caplog.text
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "Other"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert "Failed to cast media" not in caplog.text
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert "Failed to cast media http://example.com:8123/tts.mp3." in caplog.text
async def test_failed_cast_other_url(hass, caplog):
"""Test warning when casting from internal_url fails."""
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(
hass,
tts.DOMAIN,
{tts.DOMAIN: {"platform": "demo", "base_url": "http://example.local:8123"}},
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert "Failed to cast media http://example.com:8123/tts.mp3." in caplog.text
async def test_failed_cast_internal_url(hass, caplog):
"""Test warning when casting from internal_url fails."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(
hass, tts.DOMAIN, {tts.DOMAIN: {"platform": "demo"}}
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.local:8123/tts.mp3"
media_status_cb(media_status)
assert (
"Failed to cast media http://example.local:8123/tts.mp3 from internal_url"
in caplog.text
)
async def test_failed_cast_external_url(hass, caplog):
"""Test warning when casting from external_url fails."""
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com:8123"},
)
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(
hass,
tts.DOMAIN,
{tts.DOMAIN: {"platform": "demo", "base_url": "http://example.com:8123"}},
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.com:8123/tts.mp3"
media_status_cb(media_status)
assert (
"Failed to cast media http://example.com:8123/tts.mp3 from external_url"
in caplog.text
)
async def test_failed_cast_tts_base_url(hass, caplog):
"""Test warning when casting from tts.base_url fails."""
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(
hass,
tts.DOMAIN,
{tts.DOMAIN: {"platform": "demo", "base_url": "http://example.local:8123"}},
)
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
_, _, media_status_cb = get_status_callbacks(chromecast)
media_status = MagicMock(images=None)
media_status.player_is_idle = True
media_status.idle_reason = "ERROR"
media_status.content_id = "http://example.local:8123/tts.mp3"
media_status_cb(media_status)
assert (
"Failed to cast media http://example.local:8123/tts.mp3 from tts.base_url"
in caplog.text
)
async def test_disconnect_on_stop(hass: HomeAssistantType):
"""Test cast device disconnects socket on stop."""
info = get_fake_chromecast_info()
chromecast = await async_setup_media_player_cast(hass, info)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert chromecast.disconnect.call_count == 1
async def test_entry_setup_no_config(hass: HomeAssistantType):
"""Test setting up entry with no config.."""
await async_setup_component(hass, "cast", {})
await hass.async_block_till_done()
with patch(
"homeassistant.components.cast.media_player._async_setup_platform",
) as mock_setup:
await cast.async_setup_entry(hass, MockConfigEntry(), None)
assert len(mock_setup.mock_calls) == 1
assert mock_setup.mock_calls[0][1][1] == {}
async def test_entry_setup_single_config(hass: HomeAssistantType):
"""Test setting up entry and having a single config option."""
await async_setup_component(
hass, "cast", {"cast": {"media_player": {"uuid": "bla"}}}
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.cast.media_player._async_setup_platform",
) as mock_setup:
await cast.async_setup_entry(hass, MockConfigEntry(), None)
assert len(mock_setup.mock_calls) == 1
assert mock_setup.mock_calls[0][1][1] == {"uuid": "bla"}
async def test_entry_setup_list_config(hass: HomeAssistantType):
"""Test setting up entry and having multiple config options."""
await async_setup_component(
hass, "cast", {"cast": {"media_player": [{"uuid": "bla"}, {"uuid": "blu"}]}}
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.cast.media_player._async_setup_platform",
) as mock_setup:
await cast.async_setup_entry(hass, MockConfigEntry(), None)
assert len(mock_setup.mock_calls) == 2
assert mock_setup.mock_calls[0][1][1] == {"uuid": "bla"}
assert mock_setup.mock_calls[1][1][1] == {"uuid": "blu"}
async def test_entry_setup_platform_not_ready(hass: HomeAssistantType):
"""Test failed setting up entry will raise PlatformNotReady."""
await async_setup_component(
hass, "cast", {"cast": {"media_player": {"uuid": "bla"}}}
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.cast.media_player._async_setup_platform",
side_effect=Exception,
) as mock_setup:
with pytest.raises(PlatformNotReady):
await cast.async_setup_entry(hass, MockConfigEntry(), None)
assert len(mock_setup.mock_calls) == 1
assert mock_setup.mock_calls[0][1][1] == {"uuid": "bla"}
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image,
height,
width,
bbox,
fast_mode=True,
scope=None,
add_image_summaries=True,
random_crop=True,
use_grayscale=False):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
add_image_summaries: Enable image summaries.
random_crop: Enable random cropping of images during preprocessing for
training.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
if add_image_summaries:
tf.summary.image('image_with_bounding_boxes', image_with_box)
if not random_crop:
distorted_image = image
else:
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
if add_image_summaries:
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.summary.image(('cropped_' if random_crop else '') + 'resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if use_grayscale:
distorted_image = tf.image.rgb_to_grayscale(distorted_image)
if add_image_summaries:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image,
height,
width,
central_fraction=0.875,
scope=None,
central_crop=True,
use_grayscale=False):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
central_crop: Enable central cropping of images during preprocessing for
evaluation.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if use_grayscale:
image = tf.image.rgb_to_grayscale(image)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_crop and central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def preprocess_image(image,
height,
width,
is_training=False,
bbox=None,
fast_mode=True,
add_image_summaries=True,
crop_image=True,
use_grayscale=False):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
crop_image: Whether to enable cropping of images during preprocessing for
both training and evaluation.
use_grayscale: Whether to convert the image from RGB to grayscale.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(
image,
height,
width,
bbox,
fast_mode,
add_image_summaries=add_image_summaries,
random_crop=crop_image,
use_grayscale=use_grayscale)
else:
return preprocess_for_eval(
image,
height,
width,
central_crop=crop_image,
use_grayscale=use_grayscale)
| |
#!/usr/bin/python
import subprocess
import getpass
import os
import platform
import xml.etree.ElementTree as et
class GitFunctions:
PROJECT_HOME = None
GIT_USER_NAME = None
M2_HOME = os.environ.get("M2_HOME")
GIT_PASSWORD = None
VERSION_PROPERTY = None
REPOSITORY_ID = None
RELEASE_REPOSITORY_URL = None
SNAPSHOT_REPOSITORY_URL = None
def __init__(self):
self.load_environment_var()
self.get_project_home()
def load_environment_var(self):
with open("environment.txt", "r") as f:
for line in f.read().splitlines():
if line.startswith("#"):
continue
elements = line.split('=')
key = elements[0]
value = elements[1]
if key is "M2_HOME":
self.M2_HOME = value
if "PROJECT_HOME" in key:
self.PROJECT_HOME = value
if "GIT_USER_NAME" in key:
self.GIT_USER_NAME = value
if "VERSION_PROPERTY" in key:
self.VERSION_PROPERTY = value
if "REPOSITORY_ID" in key:
self.REPOSITORY_ID = value
if "RELEASE_REPOSITORY_URL" in key:
self.RELEASE_REPOSITORY_URL = value
if "SNAPSHOT_REPOSITORY_URL" in key:
self.SNAPSHOT_REPOSITORY_URL = value
def checkout_branch(self, branch):
text = subprocess.check_output(["git", "-C", self.PROJECT_HOME, "checkout", branch]).decode("utf-8")
print(text + "\n")
if "git push" in text:
return True
return False
def checkout_new_branch(self, branch_prefix, from_branch):
branch_name = input("Please enter name of branch: " + branch_prefix + "-")
complete_branch_name = branch_prefix + "-" + branch_name
already_exists = subprocess.call(
["git", "-C", self.PROJECT_HOME, "checkout", "-b", complete_branch_name, from_branch])
if already_exists is 128:
success = subprocess.call(["git", "-C", self.PROJECT_HOME, "checkout", complete_branch_name], shell=True)
self.check_success(success, "Error at checkout of branch")
return complete_branch_name
def increase_branch_version(self, is_snapshot=True, version=None):
if version is None:
increase = input("Should the version be increased? [Y/N]: ")
if increase.lower() == "Y".lower():
return self.__call_increase_version__(version, is_snapshot)
else:
return self.__call_increase_version__(version, is_snapshot)
def increase_feature_branch_version(self, is_snapshot=True):
version = input("New version of feature branch: ")
if version is None:
increase = input("Should the version be increased? [Y/N]: ")
if increase.lower() == "Y".lower():
return self.__call_increase_version__(version, is_snapshot)
else:
return self.__call_increase_version__(version, is_snapshot)
def __call_increase_version__(self, version, is_snapshot):
if is_snapshot:
version = version + "-SNAPSHOT"
mvn_path = self.norm_path(self.M2_HOME + "/bin/mvn")
mvn_cmd = ''.join([mvn_path + " versions:set -f=", self.PROJECT_HOME,
" -DnewVersion=", version, " -DprocessAllModules=true -DgenerateBackupPoms=false" ])
print("executing maven command: " + mvn_cmd + "\n")
success = subprocess.call(mvn_cmd, shell=True)
self.check_success(success, "Error setting next maven version to " + version)
if self.VERSION_PROPERTY is not None:
self.replace_property_in_pom(self.VERSION_PROPERTY, version)
return version
def norm_path(self, path):
normed_path = os.path.normpath(path)
if platform.system() == "Windows":
normed_path = "\"" + normed_path + "\""
return normed_path
def increase_branch_version_next_snapshot(self):
increase = input("Should the version be increased? [Y/N]: ")
if increase.lower() == "Y".lower():
mvn_path = self.norm_path(self.M2_HOME + "/bin/mvn")
mvn_cmd = ''.join([mvn_path + " versions:set -f=", self.PROJECT_HOME,
" -DnextSnapshot=true -DprocessAllModules=true -DgenerateBackupPoms=false" ])
print("executing maven command: " + mvn_cmd + "\n")
success = subprocess.call(mvn_cmd, shell=True)
self.check_success(success, "Error setting next maven version!")
if self.VERSION_PROPERTY is not None:
project_version = self.get_project_version()
self.replace_property_in_pom(self.VERSION_PROPERTY, project_version)
return increase.lower() == "Y".lower()
def execute_maven_goal(self, maven_goal):
if maven_goal is "deploy":
self.maven_deploy()
else:
mvn_path = self.norm_path(self.M2_HOME + "/bin/mvn")
mvn_cmd = ''.join([mvn_path, " ", maven_goal, " -f=", self.PROJECT_HOME])
print("executing maven command: " + mvn_cmd + "\n")
success = subprocess.call(mvn_cmd, shell=True)
self.check_success(success, "Error executing " + maven_goal + "!")
def maven_deploy(self):
project_version = self.get_project_version()
if "SNAPSHOT" in str(project_version):
if self.SNAPSHOT_REPOSITORY_URL is None:
repository_url = input("Please enter snapshot repository server url (e.g. "
"http://localhost/artifactory/libs-snapshot-local): ")
else:
repository_url = self.SNAPSHOT_REPOSITORY_URL
else:
if self.RELEASE_REPOSITORY_URL is None:
repository_url = input("Please enter repository server url (e.g. "
"http://localhost/artifactory/libs-release-local): ")
else:
repository_url = self.RELEASE_REPOSITORY_URL
if self.REPOSITORY_ID is None:
self.REPOSITORY_ID = input("Please enter repository ID (e.g. Artifactory Server): ")
mvn_path = self.norm_path(self.M2_HOME + "/bin/mvn")
mvn_cmd = ''.join([mvn_path + " deploy -f=", self.PROJECT_HOME,
" -DaltDeploymentRepository=", self.REPOSITORY_ID, "::default::", repository_url])
print("executing maven command: " + mvn_cmd + "\n")
success = subprocess.call(mvn_cmd, shell=True)
self.check_success(success, "Error executing deploy !")
def commit_changes(self, message=None, file_pattern=None):
entry = input("Should the changes be committed to local repository? [Y/N]: ")
if entry.lower() == "Y".lower():
if message is None:
message = input("Please enter commit message: ")
if file_pattern is None:
success = subprocess.call(["git", "-C", self.PROJECT_HOME, "commit", "-a", "-m", message])
self.check_success(success, "Error while committing to local repository, please check and try again")
return
success = subprocess.call(["git", "-C", self.PROJECT_HOME, "commit", "-m", message, self.PROJECT_HOME +
"/" + file_pattern])
self.check_success(success, "Error while committing to local repository, please check and try again")
return entry.lower() == "Y".lower()
def has_files_to_commit(self):
not_committed = subprocess.check_output(
["git", "-C", self.PROJECT_HOME, "status", "--porcelain", "--untracked-files=no"]).decode("utf-8")
print(not_committed + "\n")
if not_committed.strip() is not '':
return True
return False
def pull_branch(self, branch):
text = subprocess.check_output(["git", "-C", self.PROJECT_HOME, "pull", "origin", branch]).decode("utf-8")
print(text + "\n")
if "Already up-to-date" in text or "Already up to date" in text:
return True
return False
def get_remote_url(self):
return subprocess.check_output(["git", "-C", self.PROJECT_HOME, "config", "--get", "remote.origin.url"]).decode(
"utf-8").replace("\n", "")
def get_username(self):
if self.GIT_USER_NAME is None:
return subprocess.check_output(["git", "-C", self.PROJECT_HOME, "config", "--get", "user.name"]).decode(
"utf-8").replace("\n", "")
return self.GIT_USER_NAME
def push_branch(self, branch):
entry = input("Should all commits of " + branch + " be pushed to GitHub? [Y/N]: ")
if entry.lower() == "Y".lower():
remote_url = GitFunctions.get_remote_url(self)
username = GitFunctions.get_username(self)
if self.GIT_PASSWORD is None:
self.GIT_PASSWORD = getpass.getpass("Please enter password for " + remote_url + ": ")
remote_url = remote_url.replace("https://github.com/",
"https://" + username + ":" + self.GIT_PASSWORD + "@github.com/")
devnull = open(os.devnull, 'w')
success = subprocess.call(["git", "-C", self.PROJECT_HOME, "push", remote_url, branch],
stdout=devnull, stderr=devnull)
if success is not 0:
self.GIT_PASSWORD = None
exit("Error while pushing to GitHub. Please check username in Git config.name and password")
print("Pushing successful")
return True
exit("Please Push to branch in order to continue!")
def get_project_home(self):
if self.PROJECT_HOME is None:
self.PROJECT_HOME = input("Please enter the project directory: ")
def show_branch_state(self, branch_prefix):
subprocess.call(["git", "-C", self.PROJECT_HOME, "show-branch", "--list", branch_prefix + "-*"])
def merge_branch(self, branch_from):
merge_result = subprocess.call(["git", "-C", self.PROJECT_HOME, "merge", branch_from])
self.check_success(merge_result, "Please resolve conflict before continue")
def merge_branch_no_ff(self, branch_from):
merge_result = subprocess.call(["git", "-C", self.PROJECT_HOME, "merge", "--no-ff", branch_from])
self.check_success(merge_result, "Please resolve conflict before continue")
def delete_branch_locally(self, branch):
delete_result = subprocess.call(["git", "-C", self.PROJECT_HOME, "branch", "-d", branch])
self.check_success(delete_result, "An error occured while deleting the branch")
def get_current_branch_name(self):
branch_list = subprocess.check_output(["git", "-C", self.PROJECT_HOME, "branch", "--list"]).decode("utf-8")
branch_name = None
for name in branch_list.splitlines():
if "* " in name:
branch_name = name.replace("* ", "")
return branch_name
return branch_name
def create_release_tag(self, release_version):
subprocess.call(["git", "-C", self.PROJECT_HOME, "tag", "-a", "v" + release_version,
"-m", "Creating Tag for Release v" + release_version])
def reset_commits(self, number_of_commits=1):
success = subprocess.call(["git", "-C", self.PROJECT_HOME, "reset", "--soft", "HEAD~" + str(number_of_commits)])
self.check_success(success, "Error reset last commit")
def get_clean_branch_state(self, branch):
print("-- Step 1: Change local repository to " + branch + " --")
ahead = self.checkout_branch(branch)
print("-- Step 2: Show Status of " + branch + " --")
has_commits = self.has_files_to_commit()
if has_commits is True:
self.commit_changes()
print("-- Step 3: Pull current version of " + branch + " from GitHub --")
up_to_date = self.pull_branch(branch)
if not up_to_date:
exit("Please check pulled changes and reexecute this script again")
if ahead is True or has_commits is True:
self.push_branch(branch)
def get_project_version(self):
return et.parse(self.PROJECT_HOME + "/pom.xml").find('{http://maven.apache.org/POM/4.0.0}version').text
def replace_property_in_pom(self, tag_name, new_value):
tree = et.parse(self.PROJECT_HOME + "/pom.xml")
tag = tree.find(".//{http://maven.apache.org/POM/4.0.0}" + tag_name)
if tag is None:
exit("No Tag found in POM : " + tag_name)
tag.text = new_value
tree.write(self.PROJECT_HOME + "/pom.xml",
default_namespace='http://maven.apache.org/POM/4.0.0')
print("\n Tag " + tag_name + " set to " + new_value)
@staticmethod
def check_success(exit_code, error_msg):
if exit_code is not 0:
exit(error_msg)
| |
# Copyright 2019 NTT DATA.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_versionedobjects import base as ovoo_base
from sqlalchemy.orm import joinedload
from tacker.common import exceptions
from tacker.common import utils
from tacker.db import api as db_api
from tacker.db.db_sqlalchemy import api
from tacker.db.db_sqlalchemy import models
from tacker.objects import base
from tacker.objects import fields
VNF_SOFTWARE_IMAGE_OPTIONAL_ATTRS = ['metadata']
LOG = logging.getLogger(__name__)
def _metadata_add_to_db(context, id, metadata, max_retries=10):
for attempt in range(max_retries):
with db_api.context_manager.writer.using(context):
new_entries = []
for key, value in metadata.items():
new_entries.append({"key": key,
"value": value,
"image_uuid": id})
if new_entries:
context.session.execute(
models.VnfSoftwareImageMetadata.__table__.insert(None),
new_entries)
return metadata
@db_api.context_manager.writer
def _vnf_sw_image_create(context, values, metadata=None):
vnf_sw_image = models.VnfSoftwareImage()
vnf_sw_image.update(values)
vnf_sw_image.save(context.session)
vnf_sw_image._metadata = []
if metadata:
_metadata_add_to_db(context, vnf_sw_image.id, metadata)
context.session.expire(vnf_sw_image, ['_metadata'])
vnf_sw_image._metadata
return vnf_sw_image
@db_api.context_manager.reader
def _vnf_sw_image_get_by_id(context, id):
query = api.model_query(context, models.VnfSoftwareImage,
read_deleted="no").filter_by(id=id).options(joinedload('_metadata'))
result = query.first()
if not result:
raise exceptions.VnfSoftwareImageNotFound(id=id)
return result
@base.TackerObjectRegistry.register
class VnfSoftwareImage(base.TackerObject, base.TackerPersistentObject):
ALL_ATTRIBUTES = {
"softwareImages": {
'id': ('software_image_id', 'uuid', 'VnfSoftwareImage'),
'imagePath': ('image_path', 'string', 'VnfSoftwareImage'),
'diskFormat': ('disk_format', 'string', 'VnfSoftwareImage'),
'userMetadata/*': ('metadata', 'key_value_pair',
{"key_column": "key", "value_column": "value",
"model": "VnfSoftwareImageMetadata"}),
'size': ('size', 'number', 'VnfSoftwareImage'),
'createdAt': ('created_at', 'datetime', 'VnfSoftwareImage'),
'name': ('name', 'string', 'VnfSoftwareImage'),
'minDisk': ('min_disk', 'number', 'VnfSoftwareImage'),
'version': ('version', 'string', 'VnfSoftwareImage'),
'provider': ('provider', 'string', 'VnfSoftwareImage'),
'minRam': ('min_ram', 'number', 'VnfSoftwareImage'),
'containerFormat': ('container_format', 'string',
'VnfSoftwareImage'),
"checksum": {
'hash': ('hash', 'string', 'VnfSoftwareImage'),
'algorithm': ('algorithm', 'string', 'VnfSoftwareImage')
}
}
}
FLATTEN_ATTRIBUTES = utils.flatten_dict(ALL_ATTRIBUTES.copy())
SIMPLE_ATTRIBUTES = ['id', 'imagePath', 'diskFormat', 'size',
'createdAt', 'name', 'minDisk', 'version', 'provider', 'minRam',
'containerFormat']
COMPLEX_ATTRIBUTES = ['softwareImages', 'softwareImages/userMetadata',
'softwareImages/checksum']
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.UUIDField(nullable=False),
'software_image_id': fields.StringField(nullable=False),
'flavour_uuid': fields.UUIDField(nullable=False),
'name': fields.StringField(nullable=True),
'provider': fields.StringField(nullable=True),
'version': fields.StringField(nullable=True),
'algorithm': fields.StringField(nullable=True),
'hash': fields.StringField(nullable=True),
'container_format': fields.StringField(nullable=True),
'disk_format': fields.StringField(nullable=True),
'min_disk': fields.IntegerField(),
'min_ram': fields.IntegerField(default=0),
'size': fields.IntegerField(),
'image_path': fields.StringField(),
'metadata': fields.DictOfStringsField(nullable=True)
}
@staticmethod
def _from_db_object(context, vnf_sw_image, db_sw_image,
expected_attrs=None):
vnf_sw_image._context = context
for key in vnf_sw_image.fields:
if key in VNF_SOFTWARE_IMAGE_OPTIONAL_ATTRS:
continue
else:
db_key = key
setattr(vnf_sw_image, key, db_sw_image[db_key])
vnf_sw_image._extra_attributes_from_db_object(vnf_sw_image,
db_sw_image, expected_attrs)
vnf_sw_image.obj_reset_changes()
return vnf_sw_image
@staticmethod
def _extra_attributes_from_db_object(vnf_sw_image, db_sw_image,
expected_attrs=None):
"""Method to help with migration of extra attributes to objects.
"""
if expected_attrs is None:
expected_attrs = []
if 'metadata' in expected_attrs:
setattr(vnf_sw_image, 'metadata', db_sw_image['metadetails'])
def obj_load_attr(self, attrname):
if not self._context:
raise exceptions.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if 'id' not in self:
raise exceptions.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s",
{'attr': attrname,
'name': self.obj_name(),
'id': self.id,
})
self._obj_load_attr(attrname)
def _obj_load_attr(self, attrname):
"""Internal method for loading attributes from vnf flavour."""
if attrname in self.fields and attrname != 'id':
self._load_generic(attrname)
else:
# NOTE(nirajsingh): Raise error if non existing field is
# requested.
raise exceptions.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
self.obj_reset_changes([attrname])
def _load_generic(self, attrname):
software_image = self.__class__.get_by_id(self._context,
id=self.id,
expected_attrs=attrname)
if attrname not in software_image:
raise exceptions.ObjectActionError(
action='obj_load_attr',
reason=_('loading %s requires recursion') % attrname)
for field in self.fields:
if field in software_image and field not in self:
setattr(self, field, getattr(software_image, field))
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='create',
reason=_('already created'))
updates = self.obj_get_changes()
if 'id' not in updates:
updates['id'] = uuidutils.generate_uuid()
self.id = updates['id']
metadata = updates.pop('metadata', None)
db_sw_image = _vnf_sw_image_create(self._context, updates,
metadata=metadata)
self._from_db_object(self._context, self, db_sw_image)
@base.remotable_classmethod
def get_by_id(cls, context, id, expected_attrs=None):
db_sw_image = _vnf_sw_image_get_by_id(context, id)
return cls._from_db_object(context, cls(), db_sw_image,
expected_attrs=expected_attrs)
def _get_user_metadata(self, include_fields=None):
# Need special handling for field containing key-value pair.
# If user requests softwareImages/userMetadata/key1 and if
# softwareImages/userMetadata contains key1=value1, key2=value2,
# it should return only keys that are requested in include_fields.
# If user requests only softwareImages/userMetadata, then in that
# case, it should return all key/value pairs. If any of the requested
# key is not present, then it will siliently ignore it.
key = 'softwareImages/userMetadata'
if key in include_fields or '%s/*' % key in \
include_fields:
return self.metadata
else:
# Check if user has requested specified keys from
# softwareImages/userMetadata.
key_list = []
special_key = '%s/' % key
for field in include_fields:
if field.startswith(special_key):
key_list.append(field[len(special_key):])
data_resp = dict()
for key_req in key_list:
if key_req in self.metadata:
data_resp[key_req] = self.metadata[key_req]
if len(key_list) > 0:
return data_resp
def to_dict(self, include_fields=None):
response = dict()
fields = ['softwareImages/%s' % attribute for attribute in
self.SIMPLE_ATTRIBUTES]
to_fields = set(fields).intersection(include_fields)
for field in to_fields:
display_field = field.split("/")[-1]
response[display_field] = getattr(self,
self.FLATTEN_ATTRIBUTES[field][0])
# add checksum
to_fields = set([key for key in self.FLATTEN_ATTRIBUTES.keys()
if key.startswith('softwareImages/checksum')])
checksum = dict()
to_fields = to_fields.intersection(include_fields)
for field in to_fields:
display_field = field.split("/")[-1]
checksum[display_field] = getattr(self,
self.FLATTEN_ATTRIBUTES[field][0])
if checksum:
response.update({"checksum": checksum})
user_metadata = self._get_user_metadata(include_fields)
if user_metadata is not None:
response.update({"userMetadata": user_metadata})
return response
@base.TackerObjectRegistry.register
class VnfSoftwareImagesList(ovoo_base.ObjectListBase, base.TackerObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('VnfSoftwareImage')
}
| |
"""
This module implements multioutput regression and classification.
The estimators provided in this module are meta-estimators: they require
a base estimator to be provided in their constructor. The meta-estimator
extends single output estimators to multioutput estimators.
"""
# Author: Tim Head <betatim@gmail.com>
# Author: Hugo Bowne-Anderson <hugobowne@gmail.com>
# Author: Chris Rivera <chris.richard.rivera@gmail.com>
# Author: Michael Williamson
# Author: James Ashton Nichols <james.ashton.nichols@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from joblib import Parallel
from abc import ABCMeta, abstractmethod
from .base import BaseEstimator, clone, MetaEstimatorMixin
from .base import RegressorMixin, ClassifierMixin, is_classifier
from .model_selection import cross_val_predict
from .utils.metaestimators import available_if
from .utils import check_random_state
from .utils.validation import check_is_fitted, has_fit_parameter, _check_fit_params
from .utils.multiclass import check_classification_targets
from .utils.fixes import delayed
__all__ = [
"MultiOutputRegressor",
"MultiOutputClassifier",
"ClassifierChain",
"RegressorChain",
]
def _fit_estimator(estimator, X, y, sample_weight=None, **fit_params):
estimator = clone(estimator)
if sample_weight is not None:
estimator.fit(X, y, sample_weight=sample_weight, **fit_params)
else:
estimator.fit(X, y, **fit_params)
return estimator
def _partial_fit_estimator(
estimator, X, y, classes=None, sample_weight=None, first_time=True
):
if first_time:
estimator = clone(estimator)
if sample_weight is not None:
if classes is not None:
estimator.partial_fit(X, y, classes=classes, sample_weight=sample_weight)
else:
estimator.partial_fit(X, y, sample_weight=sample_weight)
else:
if classes is not None:
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
return estimator
def _available_if_estimator_has(attr):
"""Returns a function to check if estimator or estimators_ has attr
Helper for Chain implementations
"""
def _check(self):
return hasattr(self.estimator, attr) or all(
hasattr(est, attr) for est in self.estimators_
)
return available_if(_check)
class _MultiOutputEstimator(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
@abstractmethod
def __init__(self, estimator, *, n_jobs=None):
self.estimator = estimator
self.n_jobs = n_jobs
@_available_if_estimator_has("partial_fit")
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets.
classes : list of ndarray of shape (n_outputs,)
Each array is unique classes for one output in str/int
Can be obtained by via
``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where y is the
target matrix of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
first_time = not hasattr(self, "estimators_")
y = self._validate_data(X="no_validation", y=y, multi_output=True)
if y.ndim == 1:
raise ValueError(
"y must have at least two dimensions for "
"multi-output regression but has only one."
)
if sample_weight is not None and not has_fit_parameter(
self.estimator, "sample_weight"
):
raise ValueError("Underlying estimator does not support sample weights.")
first_time = not hasattr(self, "estimators_")
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_partial_fit_estimator)(
self.estimators_[i] if not first_time else self.estimator,
X,
y[:, i],
classes[i] if classes is not None else None,
sample_weight,
first_time,
)
for i in range(y.shape[1])
)
if first_time and hasattr(self.estimators_[0], "n_features_in_"):
self.n_features_in_ = self.estimators_[0].n_features_in_
return self
def fit(self, X, y, sample_weight=None, **fit_params):
"""Fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets. An indicator matrix turns on multilabel
estimation.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
"""
if not hasattr(self.estimator, "fit"):
raise ValueError("The base estimator should implement a fit method")
y = self._validate_data(X="no_validation", y=y, multi_output=True)
if is_classifier(self):
check_classification_targets(y)
if y.ndim == 1:
raise ValueError(
"y must have at least two dimensions for "
"multi-output regression but has only one."
)
if sample_weight is not None and not has_fit_parameter(
self.estimator, "sample_weight"
):
raise ValueError("Underlying estimator does not support sample weights.")
fit_params_validated = _check_fit_params(X, fit_params)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
self.estimator, X, y[:, i], sample_weight, **fit_params_validated
)
for i in range(y.shape[1])
)
if hasattr(self.estimators_[0], "n_features_in_"):
self.n_features_in_ = self.estimators_[0].n_features_in_
return self
def predict(self, X):
"""Predict multi-output variable using a model
trained for each target variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
Returns
-------
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets predicted across multiple predictors.
Note: Separate models are generated for each predictor.
"""
check_is_fitted(self)
if not hasattr(self.estimators_[0], "predict"):
raise ValueError("The base estimator should implement a predict method")
y = Parallel(n_jobs=self.n_jobs)(
delayed(e.predict)(X) for e in self.estimators_
)
return np.asarray(y).T
def _more_tags(self):
return {"multioutput_only": True}
class MultiOutputRegressor(RegressorMixin, _MultiOutputEstimator):
"""Multi target regression
This strategy consists of fitting one regressor per target. This is a
simple strategy for extending regressors that do not natively support
multi-target regression.
.. versionadded:: 0.18
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit` and :term:`predict`.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
:meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported
by the passed estimator) will be parallelized for each target.
When individual estimators are fast to train or predict,
using ``n_jobs > 1`` can result in slower performance due
to the parallelism overhead.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all available processes / threads.
See :term:`Glossary <n_jobs>` for more details.
.. versionchanged:: 0.20
`n_jobs` default changed from 1 to None
Attributes
----------
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying `estimator` exposes such an attribute when fit.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_linnerud
>>> from sklearn.multioutput import MultiOutputRegressor
>>> from sklearn.linear_model import Ridge
>>> X, y = load_linnerud(return_X_y=True)
>>> clf = MultiOutputRegressor(Ridge(random_state=123)).fit(X, y)
>>> clf.predict(X[[0]])
array([[176..., 35..., 57...]])
"""
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
@_available_if_estimator_has("partial_fit")
def partial_fit(self, X, y, sample_weight=None):
"""Incrementally fit the model to data.
Fit a separate model for each output variable.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
y : {array-like, sparse matrix} of shape (n_samples, n_outputs)
Multi-output targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying regressor supports sample
weights.
Returns
-------
self : object
"""
super().partial_fit(X, y, sample_weight=sample_weight)
class MultiOutputClassifier(ClassifierMixin, _MultiOutputEstimator):
"""Multi target classification
This strategy consists of fitting one classifier per target. This is a
simple strategy for extending classifiers that do not natively support
multi-target classification
Parameters
----------
estimator : estimator object
An estimator object implementing :term:`fit`, :term:`score` and
:term:`predict_proba`.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel.
:meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported
by the passed estimator) will be parallelized for each target.
When individual estimators are fast to train or predict,
using ``n_jobs > 1`` can result in slower performance due
to the parallelism overhead.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all available processes / threads.
See :term:`Glossary <n_jobs>` for more details.
.. versionchanged:: 0.20
`n_jobs` default changed from 1 to None
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels.
estimators_ : list of ``n_output`` estimators
Estimators used for predictions.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying `estimator` exposes such an attribute when fit.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> from sklearn.neighbors import KNeighborsClassifier
>>> X, y = make_multilabel_classification(n_classes=3, random_state=0)
>>> clf = MultiOutputClassifier(KNeighborsClassifier()).fit(X, y)
>>> clf.predict(X[-2:])
array([[1, 1, 0], [1, 1, 1]])
"""
def __init__(self, estimator, *, n_jobs=None):
super().__init__(estimator, n_jobs=n_jobs)
def fit(self, X, Y, sample_weight=None, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Only supported if the underlying classifier supports sample
weights.
**fit_params : dict of string -> object
Parameters passed to the ``estimator.fit`` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
"""
super().fit(X, Y, sample_weight, **fit_params)
self.classes_ = [estimator.classes_ for estimator in self.estimators_]
return self
def _check_predict_proba(self):
if hasattr(self, "estimators_"):
# raise an AttributeError if `predict_proba` does not exist for
# each estimator
[getattr(est, "predict_proba") for est in self.estimators_]
return True
# raise an AttributeError if `predict_proba` does not exist for the
# unfitted estimator
getattr(self.estimator, "predict_proba")
return True
@available_if(_check_predict_proba)
def predict_proba(self, X):
"""Probability estimates.
Returns prediction probabilities for each class of each output.
This method will raise a ``ValueError`` if any of the
estimators do not have ``predict_proba``.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data
Returns
-------
p : array of shape (n_samples, n_classes), or a list of n_outputs \
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
.. versionchanged:: 0.19
This function now returns a list of arrays where the length of
the list is ``n_outputs``, and each array is (``n_samples``,
``n_classes``) for that particular output.
"""
check_is_fitted(self)
results = [estimator.predict_proba(X) for estimator in self.estimators_]
return results
def score(self, X, y):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples
y : array-like of shape (n_samples, n_outputs)
True values for X
Returns
-------
scores : float
accuracy_score of self.predict(X) versus y
"""
check_is_fitted(self)
n_outputs_ = len(self.estimators_)
if y.ndim == 1:
raise ValueError(
"y must have at least two dimensions for "
"multi target classification but has only one"
)
if y.shape[1] != n_outputs_:
raise ValueError(
"The number of outputs of Y for fit {0} and"
" score {1} should be same".format(n_outputs_, y.shape[1])
)
y_pred = self.predict(X)
return np.mean(np.all(y == y_pred, axis=1))
def _more_tags(self):
# FIXME
return {"_skip_test": True}
def _available_if_base_estimator_has(attr):
"""Returns a function to check if base_estimator or estimators_ has attr
Helper for Chain implementations
"""
def _check(self):
return hasattr(self.base_estimator, attr) or all(
hasattr(est, attr) for est in self.estimators_
)
return available_if(_check)
class _BaseChain(BaseEstimator, metaclass=ABCMeta):
def __init__(self, base_estimator, *, order=None, cv=None, random_state=None):
self.base_estimator = base_estimator
self.order = order
self.cv = cv
self.random_state = random_state
@abstractmethod
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method of each step.
.. versionadded:: 0.23
Returns
-------
self : object
"""
X, Y = self._validate_data(X, Y, multi_output=True, accept_sparse=True)
random_state = check_random_state(self.random_state)
self.order_ = self.order
if isinstance(self.order_, tuple):
self.order_ = np.array(self.order_)
if self.order_ is None:
self.order_ = np.array(range(Y.shape[1]))
elif isinstance(self.order_, str):
if self.order_ == "random":
self.order_ = random_state.permutation(Y.shape[1])
elif sorted(self.order_) != list(range(Y.shape[1])):
raise ValueError("invalid order")
self.estimators_ = [clone(self.base_estimator) for _ in range(Y.shape[1])]
if self.cv is None:
Y_pred_chain = Y[:, self.order_]
if sp.issparse(X):
X_aug = sp.hstack((X, Y_pred_chain), format="lil")
X_aug = X_aug.tocsr()
else:
X_aug = np.hstack((X, Y_pred_chain))
elif sp.issparse(X):
Y_pred_chain = sp.lil_matrix((X.shape[0], Y.shape[1]))
X_aug = sp.hstack((X, Y_pred_chain), format="lil")
else:
Y_pred_chain = np.zeros((X.shape[0], Y.shape[1]))
X_aug = np.hstack((X, Y_pred_chain))
del Y_pred_chain
for chain_idx, estimator in enumerate(self.estimators_):
y = Y[:, self.order_[chain_idx]]
estimator.fit(X_aug[:, : (X.shape[1] + chain_idx)], y, **fit_params)
if self.cv is not None and chain_idx < len(self.estimators_) - 1:
col_idx = X.shape[1] + chain_idx
cv_result = cross_val_predict(
self.base_estimator, X_aug[:, :col_idx], y=y, cv=self.cv
)
if sp.issparse(X_aug):
X_aug[:, col_idx] = np.expand_dims(cv_result, 1)
else:
X_aug[:, col_idx] = cv_result
return self
def predict(self, X):
"""Predict on the data matrix X using the ClassifierChain model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
Y_pred : array-like of shape (n_samples, n_classes)
The predicted values.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=True, reset=False)
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
if chain_idx == 0:
X_aug = X
else:
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_pred = Y_pred_chain[:, inv_order]
return Y_pred
class ClassifierChain(MetaEstimatorMixin, ClassifierMixin, _BaseChain):
"""A multi-label model that arranges binary classifiers into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <classifierchain>`.
.. versionadded:: 0.19
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If None, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
classes_ : list
A list of arrays of length ``len(estimators_)`` containing the
class labels for each estimator in the chain.
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying `base_estimator` exposes such an attribute when fit.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.multioutput import ClassifierChain
>>> X, Y = make_multilabel_classification(
... n_samples=12, n_classes=3, random_state=0
... )
>>> X_train, X_test, Y_train, Y_test = train_test_split(
... X, Y, random_state=0
... )
>>> base_lr = LogisticRegression(solver='lbfgs', random_state=0)
>>> chain = ClassifierChain(base_lr, order='random', random_state=0)
>>> chain.fit(X_train, Y_train).predict(X_test)
array([[1., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
>>> chain.predict_proba(X_test)
array([[0.8387..., 0.9431..., 0.4576...],
[0.8878..., 0.3684..., 0.2640...],
[0.0321..., 0.9935..., 0.0625...]])
See Also
--------
RegressorChain : Equivalent for regression.
MultioutputClassifier : Classifies each output independently rather than
chaining.
References
----------
Jesse Read, Bernhard Pfahringer, Geoff Holmes, Eibe Frank, "Classifier
Chains for Multi-label Classification", 2009.
"""
def fit(self, X, Y):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
Returns
-------
self : object
"""
super().fit(X, Y)
self.classes_ = [
estimator.classes_ for chain_idx, estimator in enumerate(self.estimators_)
]
return self
@_available_if_base_estimator_has("predict_proba")
def predict_proba(self, X):
"""Predict probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Returns
-------
Y_prob : array-like of shape (n_samples, n_classes)
"""
X = self._validate_data(X, accept_sparse=True, reset=False)
Y_prob_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_prob_chain[:, chain_idx] = estimator.predict_proba(X_aug)[:, 1]
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_prob = Y_prob_chain[:, inv_order]
return Y_prob
@_available_if_base_estimator_has("decision_function")
def decision_function(self, X):
"""Evaluate the decision_function of the models in the chain.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
Y_decision : array-like of shape (n_samples, n_classes)
Returns the decision function of the sample for each model
in the chain.
"""
Y_decision_chain = np.zeros((X.shape[0], len(self.estimators_)))
Y_pred_chain = np.zeros((X.shape[0], len(self.estimators_)))
for chain_idx, estimator in enumerate(self.estimators_):
previous_predictions = Y_pred_chain[:, :chain_idx]
if sp.issparse(X):
X_aug = sp.hstack((X, previous_predictions))
else:
X_aug = np.hstack((X, previous_predictions))
Y_decision_chain[:, chain_idx] = estimator.decision_function(X_aug)
Y_pred_chain[:, chain_idx] = estimator.predict(X_aug)
inv_order = np.empty_like(self.order_)
inv_order[self.order_] = np.arange(len(self.order_))
Y_decision = Y_decision_chain[:, inv_order]
return Y_decision
def _more_tags(self):
return {"_skip_test": True, "multioutput_only": True}
class RegressorChain(MetaEstimatorMixin, RegressorMixin, _BaseChain):
"""A multi-label model that arranges regressions into a chain.
Each model makes a prediction in the order specified by the chain using
all of the available features provided to the model plus the predictions
of models that are earlier in the chain.
Read more in the :ref:`User Guide <regressorchain>`.
.. versionadded:: 0.20
Parameters
----------
base_estimator : estimator
The base estimator from which the classifier chain is built.
order : array-like of shape (n_outputs,) or 'random', default=None
If None, the order will be determined by the order of columns in
the label matrix Y.::
order = [0, 1, 2, ..., Y.shape[1] - 1]
The order of the chain can be explicitly set by providing a list of
integers. For example, for a chain of length 5.::
order = [1, 3, 2, 4, 0]
means that the first model in the chain will make predictions for
column 1 in the Y matrix, the second model will make predictions
for column 3, etc.
If order is 'random' a random ordering will be used.
cv : int, cross-validation generator or an iterable, default=None
Determines whether to use cross validated predictions or true
labels for the results of previous estimators in the chain.
Possible inputs for cv are:
- None, to use true labels when fitting,
- integer, to specify the number of folds in a (Stratified)KFold,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
random_state : int, RandomState instance or None, optional (default=None)
If ``order='random'``, determines random number generation for the
chain order.
In addition, it controls the random seed given at each `base_estimator`
at each chaining iteration. Thus, it is only used when `base_estimator`
exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimators_ : list
A list of clones of base_estimator.
order_ : list
The order of labels in the classifier chain.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying `base_estimator` exposes such an attribute when fit.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.multioutput import RegressorChain
>>> from sklearn.linear_model import LogisticRegression
>>> logreg = LogisticRegression(solver='lbfgs',multi_class='multinomial')
>>> X, Y = [[1, 0], [0, 1], [1, 1]], [[0, 2], [1, 1], [2, 0]]
>>> chain = RegressorChain(base_estimator=logreg, order=[0, 1]).fit(X, Y)
>>> chain.predict(X)
array([[0., 2.],
[1., 1.],
[2., 0.]])
See Also
--------
ClassifierChain : Equivalent for classification.
MultioutputRegressor : Learns each output independently rather than
chaining.
"""
def fit(self, X, Y, **fit_params):
"""Fit the model to data matrix X and targets Y.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Y : array-like of shape (n_samples, n_classes)
The target values.
**fit_params : dict of string -> object
Parameters passed to the `fit` method at each step
of the regressor chain.
.. versionadded:: 0.23
Returns
-------
self : object
"""
super().fit(X, Y, **fit_params)
return self
def _more_tags(self):
return {"multioutput_only": True}
| |
"""
mod_sample Models
===================
In this module, we are trying to maintain database regarding various
sample, ExtraFile, ForbiddenExtension, ForbiddenMimeType, Issue
"""
from sqlalchemy import Column, Integer, String, Text, ForeignKey, DateTime
from sqlalchemy.orm import relationship
from database import Base, DeclEnum
from datetime import datetime
class Sample(Base):
__tablename__ = 'sample'
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(Integer, primary_key=True)
sha = Column(String(128), unique=True)
extension = Column(String(64), nullable=False)
original_name = Column(Text(), nullable=False)
extra_files = relationship('ExtraFile', back_populates='sample')
tests = relationship('RegressionTest', back_populates='sample')
upload = relationship('Upload', uselist=False, back_populates='sample')
def __init__(self, sha, extension, original_name):
"""
Parametrized constructor for the Sample model
:param sha: The value of the 'sha' field of Sample model
:type sha: str
:param extension: The value of the 'extension' field of Sample model
:type extension: str
:param original_name: The value of the 'original_name' field of Sample model
:type original_name: str
"""
self.sha = sha
self.extension = extension
self.original_name = original_name
def __repr__(self):
"""
Representation function
Represent a Sample Model by its 'sha' Field.
:return: Returns the string containing 'sha' field of the Category model
:rtype: str
"""
return '<Sample {hash}>'.format(hash=self.sha)
@property
def filename(self):
"""
Return the full filename of the sample
"""
extension = ("." + self.extension) if len(self.extension) > 0 else ""
return "{sha}{extension}".format(sha=self.sha, extension=extension)
class ExtraFile(Base):
__tablename__ = 'sample_extra'
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(Integer, primary_key=True)
sample_id = Column(Integer, ForeignKey('sample.id', onupdate="CASCADE", ondelete="CASCADE"))
sample = relationship('Sample', uselist=False, back_populates='extra_files')
original_name = Column(Text(), nullable=False)
extension = Column(String(64), nullable=False)
def __init__(self, sample_id, extension, original_name):
"""
Parametrized constructor for the ExtraFile model
:param sample_id: The value of the 'sha' field of ExtraFile model
:type sample_id: int
:param extension: The value of the 'extension' field of ExtraFile model
:type extension: str
:param original_name: The value of the 'original_name' field of ExtraFile model
:type original_name: str
"""
self.sample_id = sample_id
self.extension = extension
self.original_name = original_name
def __repr__(self):
"""
Representation function
Represent a ExtraFile Model by its 'sample_id' Field.
:return: Returns the string containing 'sha' field of the ExtraFile model
:rtype: str
"""
return '<Sample extra for {id}>'.format(id=self.sample_id)
@property
def short_name(self, length=5):
"""
Function to return the short name of an additional file.
:param length: How many characters of the hash should be retained for the short name? Defaults to 5.
:type length: int
:return: A short name consisting of the first x characters of the hash, the id and the file extension.
:rtype: str
"""
return "{short}_{id}.{extension}".format(
short=self.sample.sha[:length], id=self.id,
extension=self.extension
)
@property
def filename(self):
"""
Function to return filename
:return: Returns the full name of the file using the hash, id and file extension.
:rtype: str
"""
extension = ("." + self.extension) if len(self.extension) > 0 else ""
return "{sha}_{id}{extension}".format(sha=self.sample.sha, id=self.id, extension=extension)
class ForbiddenExtension(Base):
__tablename__ = 'extension_forbidden'
__table_args__ = {'mysql_engine': 'InnoDB'}
extension = Column(String(32), primary_key=True)
def __init__(self, extension):
"""
Parametrized constructor for the ForbiddenExtension model
:param extension: The value of the 'extension' field of ForbiddenExtension model
:type extension: str
"""
self.extension = extension
def __repr__(self):
"""
Representation function
Represent a ForbiddenExtension Model by its 'extension' Field.
:return: Returns the string containing 'extension' field of the ForbiddenExtension model
:rtype: str
"""
return '<Forbidden extension {extension}>'.format(extension=self.extension)
class ForbiddenMimeType(Base):
__tablename__ = 'mimetype_forbidden'
__table_args__ = {'mysql_engine': 'InnoDB'}
mimetype = Column(String(64), primary_key=True)
def __init__(self, mimetype):
"""
Parametrized constructor for the ForbiddenMimeType model
:param mimetype: The value of the 'mimetype' field of ForbiddenMimeType model
:type mimetype: str
"""
self.mimetype = mimetype
def __repr__(self):
"""
Representation function
Represent a ForbiddenMimeType Model by its 'mimetype' Field.
:return: Returns the string containing 'mimetype' field of the ForbiddenMimeType model
:rtype: str
"""
return '<Forbidden MimeType {mime}>'.format(mime=self.mimetype)
class Issue(Base):
__tablename__ = 'sample_issue'
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(Integer, primary_key=True)
sample_id = Column(Integer, ForeignKey('sample.id', onupdate="CASCADE",
ondelete="CASCADE"))
sample = relationship('Sample', uselist=False)
issue_id = Column(Integer, nullable=False)
title = Column(Text(), nullable=False)
user = Column(Text(), nullable=False)
created_at = Column(DateTime(timezone=True), nullable=False)
status = Column(Text(), nullable=False)
def __init__(self, sample_id, issue_id, date, title, user, status):
"""
Parametrized constructor for the Issue model
:param sample_id: The value of the 'sample_id' field of Issue model
:type sample_id: int
:param issue_id: The value of the 'issue_id' field of Issue model
:type issue_id: int
:param date: The value of the 'created_at' field of Issue model
:type date: datetime
:param title: The value of the 'title' field of Issue model
:type title: str
:param user: The value of the 'user' field of Issue model
:type user: str
:param status: The value of the 'status' field of Issue model
:type status: str
"""
self.sample_id = sample_id
self.issue_id = issue_id
self.created_at = datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ')
self.title = title
self.user = user
self.status = status
| |
from collections import namedtuple, defaultdict
import copy
import os
import sys
from itertools import permutations, takewhile
from contextlib import contextmanager
import numpy as np
from llvmlite import ir as llvmir
import llvmlite.llvmpy.core as lc
from llvmlite.llvmpy.core import Type, Constant, LLVMException
import llvmlite.binding as ll
from numba.core import types, utils, typing, datamodel, debuginfo, funcdesc, config, cgutils, imputils
from numba.core import event
from numba import _dynfunc, _helperlib
from numba.core.compiler_lock import global_compiler_lock
from numba.core.pythonapi import PythonAPI
from numba.core.imputils import (user_function, user_generator,
builtin_registry, impl_ret_borrowed,
RegistryLoader)
from numba.cpython import builtins
GENERIC_POINTER = Type.pointer(Type.int(8))
PYOBJECT = GENERIC_POINTER
void_ptr = GENERIC_POINTER
class OverloadSelector(object):
"""
An object matching an actual signature against a registry of formal
signatures and choosing the best candidate, if any.
In the current implementation:
- a "signature" is a tuple of type classes or type instances
- the "best candidate" is the most specific match
"""
def __init__(self):
# A list of (formal args tuple, value)
self.versions = []
self._cache = {}
def find(self, sig):
out = self._cache.get(sig)
if out is None:
out = self._find(sig)
self._cache[sig] = out
return out
def _find(self, sig):
candidates = self._select_compatible(sig)
if candidates:
return candidates[self._best_signature(candidates)]
else:
raise NotImplementedError(self, sig)
def _select_compatible(self, sig):
"""
Select all compatible signatures and their implementation.
"""
out = {}
for ver_sig, impl in self.versions:
if self._match_arglist(ver_sig, sig):
out[ver_sig] = impl
return out
def _best_signature(self, candidates):
"""
Returns the best signature out of the candidates
"""
ordered, genericity = self._sort_signatures(candidates)
# check for ambiguous signatures
if len(ordered) > 1:
firstscore = genericity[ordered[0]]
same = list(takewhile(lambda x: genericity[x] == firstscore,
ordered))
if len(same) > 1:
msg = ["{n} ambiguous signatures".format(n=len(same))]
for sig in same:
msg += ["{0} => {1}".format(sig, candidates[sig])]
raise TypeError('\n'.join(msg))
return ordered[0]
def _sort_signatures(self, candidates):
"""
Sort signatures in ascending level of genericity.
Returns a 2-tuple:
* ordered list of signatures
* dictionary containing genericity scores
"""
# score by genericity
genericity = defaultdict(int)
for this, other in permutations(candidates.keys(), r=2):
matched = self._match_arglist(formal_args=this, actual_args=other)
if matched:
# genericity score +1 for every another compatible signature
genericity[this] += 1
# order candidates in ascending level of genericity
ordered = sorted(candidates.keys(), key=lambda x: genericity[x])
return ordered, genericity
def _match_arglist(self, formal_args, actual_args):
"""
Returns True if the signature is "matching".
A formal signature is "matching" if the actual signature matches exactly
or if the formal signature is a compatible generic signature.
"""
# normalize VarArg
if formal_args and isinstance(formal_args[-1], types.VarArg):
ndiff = len(actual_args) - len(formal_args) + 1
formal_args = formal_args[:-1] + (formal_args[-1].dtype,) * ndiff
if len(formal_args) != len(actual_args):
return False
for formal, actual in zip(formal_args, actual_args):
if not self._match(formal, actual):
return False
return True
def _match(self, formal, actual):
if formal == actual:
# formal argument matches actual arguments
return True
elif types.Any == formal:
# formal argument is any
return True
elif isinstance(formal, type) and issubclass(formal, types.Type):
if isinstance(actual, type) and issubclass(actual, formal):
# formal arg is a type class and actual arg is a subclass
return True
elif isinstance(actual, formal):
# formal arg is a type class of which actual arg is an instance
return True
def append(self, value, sig):
"""
Add a formal signature and its associated value.
"""
assert isinstance(sig, tuple), (value, sig)
self.versions.append((sig, value))
self._cache.clear()
@utils.runonce
def _load_global_helpers():
"""
Execute once to install special symbols into the LLVM symbol table.
"""
# This is Py_None's real C name
ll.add_symbol("_Py_NoneStruct", id(None))
# Add Numba C helper functions
for c_helpers in (_helperlib.c_helpers, _dynfunc.c_helpers):
for py_name, c_address in c_helpers.items():
c_name = "numba_" + py_name
ll.add_symbol(c_name, c_address)
# Add Numpy C helpers (npy_XXX)
for c_name, c_address in _helperlib.npymath_exports.items():
ll.add_symbol(c_name, c_address)
# Add all built-in exception classes
for obj in utils.builtins.__dict__.values():
if isinstance(obj, type) and issubclass(obj, BaseException):
ll.add_symbol("PyExc_%s" % (obj.__name__), id(obj))
class BaseContext(object):
"""
Notes on Structure
------------------
Most objects are lowered as plain-old-data structure in the generated
llvm. They are passed around by reference (a pointer to the structure).
Only POD structure can live across function boundaries by copying the
data.
"""
# True if the target requires strict alignment
# Causes exception to be raised if the record members are not aligned.
strict_alignment = False
# Force powi implementation as math.pow call
implement_powi_as_math_call = False
implement_pow_as_math_call = False
# Emit Debug info
enable_debuginfo = False
DIBuilder = debuginfo.DIBuilder
# Bound checking
@property
def enable_boundscheck(self):
if config.BOUNDSCHECK is not None:
return config.BOUNDSCHECK
return self._boundscheck
@enable_boundscheck.setter
def enable_boundscheck(self, value):
self._boundscheck = value
# NRT
enable_nrt = False
# Auto parallelization
auto_parallel = False
# PYCC
aot_mode = False
# Error model for various operations (only FP exceptions currently)
error_model = None
# Whether dynamic globals (CPU runtime addresses) is allowed
allow_dynamic_globals = False
# Fast math flags
fastmath = False
# python execution environment
environment = None
# the function descriptor
fndesc = None
def __init__(self, typing_context, target):
_load_global_helpers()
self.address_size = utils.MACHINE_BITS
self.typing_context = typing_context
from numba.core.target_extension import target_registry
self.target_name = target
self.target = target_registry[target]
# A mapping of installed registries to their loaders
self._registries = {}
# Declarations loaded from registries and other sources
self._defns = defaultdict(OverloadSelector)
self._getattrs = defaultdict(OverloadSelector)
self._setattrs = defaultdict(OverloadSelector)
self._casts = OverloadSelector()
self._get_constants = OverloadSelector()
# Other declarations
self._generators = {}
self.special_ops = {}
self.cached_internal_func = {}
self._pid = None
self._codelib_stack = []
self._boundscheck = False
self.data_model_manager = datamodel.default_manager
# Initialize
self.init()
def init(self):
"""
For subclasses to add initializer
"""
def refresh(self):
"""
Refresh context with new declarations from known registries.
Useful for third-party extensions.
"""
# load target specific registries
self.load_additional_registries()
# Populate the builtin registry, this has to happen after loading
# additional registries as some of the "additional" registries write
# their implementations into the builtin_registry and would be missed if
# this ran first.
self.install_registry(builtin_registry)
# Also refresh typing context, since @overload declarations can
# affect it.
self.typing_context.refresh()
def load_additional_registries(self):
"""
Load target-specific registries. Can be overridden by subclasses.
"""
def mangler(self, name, types):
"""
Perform name mangling.
"""
return funcdesc.default_mangler(name, types)
def get_env_name(self, fndesc):
"""Get the environment name given a FunctionDescriptor.
Use this instead of the ``fndesc.env_name`` so that the target-context
can provide necessary mangling of the symbol to meet ABI requirements.
"""
return fndesc.env_name
def declare_env_global(self, module, envname):
"""Declare the Environment pointer as a global of the module.
The pointer is initialized to NULL. It must be filled by the runtime
with the actual address of the Env before the associated function
can be executed.
Parameters
----------
module :
The LLVM Module
envname : str
The name of the global variable.
"""
if envname not in module.globals:
gv = llvmir.GlobalVariable(module, cgutils.voidptr_t, name=envname)
gv.linkage = 'common'
gv.initializer = cgutils.get_null_value(gv.type.pointee)
return module.globals[envname]
def get_arg_packer(self, fe_args):
return datamodel.ArgPacker(self.data_model_manager, fe_args)
def get_data_packer(self, fe_types):
return datamodel.DataPacker(self.data_model_manager, fe_types)
@property
def target_data(self):
raise NotImplementedError
@utils.cached_property
def nonconst_module_attrs(self):
"""
All module attrs are constant for targets using BaseContext.
"""
return tuple()
@utils.cached_property
def nrt(self):
from numba.core.runtime.context import NRTContext
return NRTContext(self, self.enable_nrt)
def subtarget(self, **kws):
obj = copy.copy(self) # shallow copy
for k, v in kws.items():
if not hasattr(obj, k):
raise NameError("unknown option {0!r}".format(k))
setattr(obj, k, v)
if obj.codegen() is not self.codegen():
# We can't share functions across different codegens
obj.cached_internal_func = {}
return obj
def install_registry(self, registry):
"""
Install a *registry* (a imputils.Registry instance) of function
and attribute implementations.
"""
try:
loader = self._registries[registry]
except KeyError:
loader = RegistryLoader(registry)
self._registries[registry] = loader
self.insert_func_defn(loader.new_registrations('functions'))
self._insert_getattr_defn(loader.new_registrations('getattrs'))
self._insert_setattr_defn(loader.new_registrations('setattrs'))
self._insert_cast_defn(loader.new_registrations('casts'))
self._insert_get_constant_defn(loader.new_registrations('constants'))
def insert_func_defn(self, defns):
for impl, func, sig in defns:
self._defns[func].append(impl, sig)
def _insert_getattr_defn(self, defns):
for impl, attr, sig in defns:
self._getattrs[attr].append(impl, sig)
def _insert_setattr_defn(self, defns):
for impl, attr, sig in defns:
self._setattrs[attr].append(impl, sig)
def _insert_cast_defn(self, defns):
for impl, sig in defns:
self._casts.append(impl, sig)
def _insert_get_constant_defn(self, defns):
for impl, sig in defns:
self._get_constants.append(impl, sig)
def insert_user_function(self, func, fndesc, libs=()):
impl = user_function(fndesc, libs)
self._defns[func].append(impl, impl.signature)
def add_user_function(self, func, fndesc, libs=()):
if func not in self._defns:
msg = "{func} is not a registered user function"
raise KeyError(msg.format(func=func))
impl = user_function(fndesc, libs)
self._defns[func].append(impl, impl.signature)
def insert_generator(self, genty, gendesc, libs=()):
assert isinstance(genty, types.Generator)
impl = user_generator(gendesc, libs)
self._generators[genty] = gendesc, impl
def remove_user_function(self, func):
"""
Remove user function *func*.
KeyError is raised if the function isn't known to us.
"""
del self._defns[func]
def get_external_function_type(self, fndesc):
argtypes = [self.get_argument_type(aty)
for aty in fndesc.argtypes]
# don't wrap in pointer
restype = self.get_argument_type(fndesc.restype)
fnty = Type.function(restype, argtypes)
return fnty
def declare_function(self, module, fndesc):
fnty = self.call_conv.get_function_type(fndesc.restype, fndesc.argtypes)
fn = cgutils.get_or_insert_function(module, fnty, fndesc.mangled_name)
self.call_conv.decorate_function(fn, fndesc.args, fndesc.argtypes, noalias=fndesc.noalias)
if fndesc.inline:
fn.attributes.add('alwaysinline')
return fn
def declare_external_function(self, module, fndesc):
fnty = self.get_external_function_type(fndesc)
fn = cgutils.get_or_insert_function(module, fnty, fndesc.mangled_name)
assert fn.is_declaration
for ak, av in zip(fndesc.args, fn.args):
av.name = "arg.%s" % ak
return fn
def insert_const_string(self, mod, string):
"""
Insert constant *string* (a str object) into module *mod*.
"""
stringtype = GENERIC_POINTER
name = ".const.%s" % string
text = cgutils.make_bytearray(string.encode("utf-8") + b"\x00")
gv = self.insert_unique_const(mod, name, text)
return Constant.bitcast(gv, stringtype)
def insert_const_bytes(self, mod, bytes, name=None):
"""
Insert constant *byte* (a `bytes` object) into module *mod*.
"""
stringtype = GENERIC_POINTER
name = ".bytes.%s" % (name or hash(bytes))
text = cgutils.make_bytearray(bytes)
gv = self.insert_unique_const(mod, name, text)
return Constant.bitcast(gv, stringtype)
def insert_unique_const(self, mod, name, val):
"""
Insert a unique internal constant named *name*, with LLVM value
*val*, into module *mod*.
"""
try:
gv = mod.get_global(name)
except KeyError:
return cgutils.global_constant(mod, name, val)
else:
return gv
def get_argument_type(self, ty):
return self.data_model_manager[ty].get_argument_type()
def get_return_type(self, ty):
return self.data_model_manager[ty].get_return_type()
def get_data_type(self, ty):
"""
Get a LLVM data representation of the Numba type *ty* that is safe
for storage. Record data are stored as byte array.
The return value is a llvmlite.ir.Type object, or None if the type
is an opaque pointer (???).
"""
return self.data_model_manager[ty].get_data_type()
def get_value_type(self, ty):
return self.data_model_manager[ty].get_value_type()
def pack_value(self, builder, ty, value, ptr, align=None):
"""
Pack value into the array storage at *ptr*.
If *align* is given, it is the guaranteed alignment for *ptr*
(by default, the standard ABI alignment).
"""
dataval = self.data_model_manager[ty].as_data(builder, value)
builder.store(dataval, ptr, align=align)
def unpack_value(self, builder, ty, ptr, align=None):
"""
Unpack value from the array storage at *ptr*.
If *align* is given, it is the guaranteed alignment for *ptr*
(by default, the standard ABI alignment).
"""
dm = self.data_model_manager[ty]
return dm.load_from_data_pointer(builder, ptr, align)
def get_constant_generic(self, builder, ty, val):
"""
Return a LLVM constant representing value *val* of Numba type *ty*.
"""
try:
impl = self._get_constants.find((ty,))
return impl(self, builder, ty, val)
except NotImplementedError:
raise NotImplementedError("Cannot lower constant of type '%s'" % (ty,))
def get_constant(self, ty, val):
"""
Same as get_constant_generic(), but without specifying *builder*.
Works only for simple types.
"""
# HACK: pass builder=None to preserve get_constant() API
return self.get_constant_generic(None, ty, val)
def get_constant_undef(self, ty):
lty = self.get_value_type(ty)
return Constant.undef(lty)
def get_constant_null(self, ty):
lty = self.get_value_type(ty)
return Constant.null(lty)
def get_function(self, fn, sig, _firstcall=True):
"""
Return the implementation of function *fn* for signature *sig*.
The return value is a callable with the signature (builder, args).
"""
assert sig is not None
sig = sig.as_function()
if isinstance(fn, (types.Function, types.BoundFunction,
types.Dispatcher)):
key = fn.get_impl_key(sig)
overloads = self._defns[key]
else:
key = fn
overloads = self._defns[key]
try:
return _wrap_impl(overloads.find(sig.args), self, sig)
except NotImplementedError:
pass
if isinstance(fn, types.Type):
# It's a type instance => try to find a definition for the type class
try:
return self.get_function(type(fn), sig)
except NotImplementedError:
# Raise exception for the type instance, for a better error message
pass
# Automatically refresh the context to load new registries if we are
# calling the first time.
if _firstcall:
self.refresh()
return self.get_function(fn, sig, _firstcall=False)
raise NotImplementedError("No definition for lowering %s%s" % (key, sig))
def get_generator_desc(self, genty):
"""
"""
return self._generators[genty][0]
def get_generator_impl(self, genty):
"""
"""
res = self._generators[genty][1]
self.add_linking_libs(getattr(res, 'libs', ()))
return res
def get_bound_function(self, builder, obj, ty):
assert self.get_value_type(ty) == obj.type
return obj
def get_getattr(self, typ, attr):
"""
Get the getattr() implementation for the given type and attribute name.
The return value is a callable with the signature
(context, builder, typ, val, attr).
"""
const_attr = (typ, attr) not in self.nonconst_module_attrs
is_module = isinstance(typ, types.Module)
if is_module and const_attr:
# Implement getattr for module-level globals that we treat as
# constants.
# XXX We shouldn't have to retype this
attrty = self.typing_context.resolve_module_constants(typ, attr)
if attrty is None or isinstance(attrty, types.Dummy):
# No implementation required for dummies (functions, modules...),
# which are dealt with later
return None
else:
pyval = getattr(typ.pymod, attr)
def imp(context, builder, typ, val, attr):
llval = self.get_constant_generic(builder, attrty, pyval)
return impl_ret_borrowed(context, builder, attrty, llval)
return imp
# Lookup specific getattr implementation for this type and attribute
overloads = self._getattrs[attr]
try:
return overloads.find((typ,))
except NotImplementedError:
pass
# Lookup generic getattr implementation for this type
overloads = self._getattrs[None]
try:
return overloads.find((typ,))
except NotImplementedError:
pass
raise NotImplementedError("No definition for lowering %s.%s" % (typ, attr))
def get_setattr(self, attr, sig):
"""
Get the setattr() implementation for the given attribute name
and signature.
The return value is a callable with the signature (builder, args).
"""
assert len(sig.args) == 2
typ = sig.args[0]
valty = sig.args[1]
def wrap_setattr(impl):
def wrapped(builder, args):
return impl(self, builder, sig, args, attr)
return wrapped
# Lookup specific setattr implementation for this type and attribute
overloads = self._setattrs[attr]
try:
return wrap_setattr(overloads.find((typ, valty)))
except NotImplementedError:
pass
# Lookup generic setattr implementation for this type
overloads = self._setattrs[None]
try:
return wrap_setattr(overloads.find((typ, valty)))
except NotImplementedError:
pass
raise NotImplementedError("No definition for lowering %s.%s = %s"
% (typ, attr, valty))
def get_argument_value(self, builder, ty, val):
"""
Argument representation to local value representation
"""
return self.data_model_manager[ty].from_argument(builder, val)
def get_returned_value(self, builder, ty, val):
"""
Return value representation to local value representation
"""
return self.data_model_manager[ty].from_return(builder, val)
def get_return_value(self, builder, ty, val):
"""
Local value representation to return type representation
"""
return self.data_model_manager[ty].as_return(builder, val)
def get_value_as_argument(self, builder, ty, val):
"""Prepare local value representation as argument type representation
"""
return self.data_model_manager[ty].as_argument(builder, val)
def get_value_as_data(self, builder, ty, val):
return self.data_model_manager[ty].as_data(builder, val)
def get_data_as_value(self, builder, ty, val):
return self.data_model_manager[ty].from_data(builder, val)
def pair_first(self, builder, val, ty):
"""
Extract the first element of a heterogeneous pair.
"""
pair = self.make_helper(builder, ty, val)
return pair.first
def pair_second(self, builder, val, ty):
"""
Extract the second element of a heterogeneous pair.
"""
pair = self.make_helper(builder, ty, val)
return pair.second
def cast(self, builder, val, fromty, toty):
"""
Cast a value of type *fromty* to type *toty*.
This implements implicit conversions as can happen due to the
granularity of the Numba type system, or lax Python semantics.
"""
if fromty == toty or toty == types.Any:
return val
try:
impl = self._casts.find((fromty, toty))
return impl(self, builder, fromty, toty, val)
except NotImplementedError:
raise NotImplementedError(
"Cannot cast %s to %s: %s" % (fromty, toty, val))
def generic_compare(self, builder, key, argtypes, args):
"""
Compare the given LLVM values of the given Numba types using
the comparison *key* (e.g. '=='). The values are first cast to
a common safe conversion type.
"""
at, bt = argtypes
av, bv = args
ty = self.typing_context.unify_types(at, bt)
assert ty is not None
cav = self.cast(builder, av, at, ty)
cbv = self.cast(builder, bv, bt, ty)
fnty = self.typing_context.resolve_value_type(key)
# the sig is homogeneous in the unified casted type
cmpsig = fnty.get_call_type(self.typing_context, (ty, ty), {})
cmpfunc = self.get_function(fnty, cmpsig)
self.add_linking_libs(getattr(cmpfunc, 'libs', ()))
return cmpfunc(builder, (cav, cbv))
def make_optional_none(self, builder, valtype):
optval = self.make_helper(builder, types.Optional(valtype))
optval.valid = cgutils.false_bit
return optval._getvalue()
def make_optional_value(self, builder, valtype, value):
optval = self.make_helper(builder, types.Optional(valtype))
optval.valid = cgutils.true_bit
optval.data = value
return optval._getvalue()
def is_true(self, builder, typ, val):
"""
Return the truth value of a value of the given Numba type.
"""
fnty = self.typing_context.resolve_value_type(bool)
sig = fnty.get_call_type(self.typing_context, (typ,), {})
impl = self.get_function(fnty, sig)
return impl(builder, (val,))
def get_c_value(self, builder, typ, name, dllimport=False):
"""
Get a global value through its C-accessible *name*, with the given
LLVM type.
If *dllimport* is true, the symbol will be marked as imported
from a DLL (necessary for AOT compilation under Windows).
"""
module = builder.function.module
try:
gv = module.globals[name]
except KeyError:
gv = cgutils.add_global_variable(module, typ, name)
if dllimport and self.aot_mode and sys.platform == 'win32':
gv.storage_class = "dllimport"
return gv
def call_external_function(self, builder, callee, argtys, args):
args = [self.get_value_as_argument(builder, ty, arg)
for ty, arg in zip(argtys, args)]
retval = builder.call(callee, args)
return retval
def get_function_pointer_type(self, typ):
return self.data_model_manager[typ].get_data_type()
def call_function_pointer(self, builder, funcptr, args, cconv=None):
return builder.call(funcptr, args, cconv=cconv)
def print_string(self, builder, text):
mod = builder.module
cstring = GENERIC_POINTER
fnty = Type.function(Type.int(), [cstring])
puts = cgutils.get_or_insert_function(mod, fnty, "puts")
return builder.call(puts, [text])
def debug_print(self, builder, text):
mod = builder.module
cstr = self.insert_const_string(mod, str(text))
self.print_string(builder, cstr)
def printf(self, builder, format_string, *args):
mod = builder.module
if isinstance(format_string, str):
cstr = self.insert_const_string(mod, format_string)
else:
cstr = format_string
fnty = Type.function(Type.int(), (GENERIC_POINTER,), var_arg=True)
fn = cgutils.get_or_insert_function(mod, fnty, "printf")
return builder.call(fn, (cstr,) + tuple(args))
def get_struct_type(self, struct):
"""
Get the LLVM struct type for the given Structure class *struct*.
"""
fields = [self.get_value_type(v) for _, v in struct._fields]
return Type.struct(fields)
def get_dummy_value(self):
return Constant.null(self.get_dummy_type())
def get_dummy_type(self):
return GENERIC_POINTER
def _compile_subroutine_no_cache(self, builder, impl, sig, locals={},
flags=None):
"""
Invoke the compiler to compile a function to be used inside a
nopython function, but without generating code to call that
function.
Note this context's flags are not inherited.
"""
# Compile
from numba.core import compiler
with global_compiler_lock:
codegen = self.codegen()
library = codegen.create_library(impl.__name__)
if flags is None:
cstk = utils.ConfigStack()
flags = compiler.Flags()
if cstk:
tls_flags = cstk.top()
if tls_flags.is_set("nrt") and tls_flags.nrt:
flags.nrt = True
flags.no_compile = True
flags.no_cpython_wrapper = True
flags.no_cfunc_wrapper = True
cres = compiler.compile_internal(self.typing_context, self,
library,
impl, sig.args,
sig.return_type, flags,
locals=locals)
# Allow inlining the function inside callers.
self.active_code_library.add_linking_library(cres.library)
return cres
def compile_subroutine(self, builder, impl, sig, locals={}, flags=None,
caching=True):
"""
Compile the function *impl* for the given *sig* (in nopython mode).
Return an instance of CompileResult.
If *caching* evaluates True, the function keeps the compiled function
for reuse in *.cached_internal_func*.
"""
cache_key = (impl.__code__, sig, type(self.error_model))
if not caching:
cached = None
else:
if impl.__closure__:
# XXX This obviously won't work if a cell's value is
# unhashable.
cache_key += tuple(c.cell_contents for c in impl.__closure__)
cached = self.cached_internal_func.get(cache_key)
if cached is None:
cres = self._compile_subroutine_no_cache(builder, impl, sig,
locals=locals,
flags=flags)
self.cached_internal_func[cache_key] = cres
cres = self.cached_internal_func[cache_key]
# Allow inlining the function inside callers.
self.active_code_library.add_linking_library(cres.library)
return cres
def compile_internal(self, builder, impl, sig, args, locals={}):
"""
Like compile_subroutine(), but also call the function with the given
*args*.
"""
cres = self.compile_subroutine(builder, impl, sig, locals)
return self.call_internal(builder, cres.fndesc, sig, args)
def call_internal(self, builder, fndesc, sig, args):
"""
Given the function descriptor of an internally compiled function,
emit a call to that function with the given arguments.
"""
status, res = self.call_internal_no_propagate(builder, fndesc, sig, args)
with cgutils.if_unlikely(builder, status.is_error):
self.call_conv.return_status_propagate(builder, status)
res = imputils.fix_returning_optional(self, builder, sig, status, res)
return res
def call_internal_no_propagate(self, builder, fndesc, sig, args):
"""Similar to `.call_internal()` but does not handle or propagate
the return status automatically.
"""
# Add call to the generated function
llvm_mod = builder.module
fn = self.declare_function(llvm_mod, fndesc)
status, res = self.call_conv.call_function(builder, fn, sig.return_type,
sig.args, args)
return status, res
def call_unresolved(self, builder, name, sig, args):
"""
Insert a function call to an unresolved symbol with the given *name*.
Note: this is used for recursive call.
In the mutual recursion case::
@njit
def foo():
... # calls bar()
@njit
def bar():
... # calls foo()
foo()
When foo() is called, the compilation of bar() is fully completed
(codegen'ed and loaded) before foo() is. Since MCJIT's eager compilation
doesn't allow loading modules with declare-only functions (which is
needed for foo() in bar()), the call_unresolved injects a global
variable that the "linker" can update even after the module is loaded by
MCJIT. The linker would allocate space for the global variable before
the bar() module is loaded. When later foo() module is defined, it will
update bar()'s reference to foo().
The legacy lazy JIT and the new ORC JIT would allow a declare-only
function be used in a module as long as it is defined by the time of its
first use.
"""
# Insert an unresolved reference to the function being called.
codegen = self.codegen()
fnty = self.call_conv.get_function_type(sig.return_type, sig.args)
fn = codegen.insert_unresolved_ref(builder, fnty, name)
# Normal call sequence
status, res = self.call_conv.call_function(builder, fn, sig.return_type,
sig.args, args)
with cgutils.if_unlikely(builder, status.is_error):
self.call_conv.return_status_propagate(builder, status)
res = imputils.fix_returning_optional(self, builder, sig, status, res)
return res
def get_executable(self, func, fndesc):
raise NotImplementedError
def get_python_api(self, builder):
return PythonAPI(self, builder)
def sentry_record_alignment(self, rectyp, attr):
"""
Assumes offset starts from a properly aligned location
"""
if self.strict_alignment:
offset = rectyp.offset(attr)
elemty = rectyp.typeof(attr)
align = self.get_abi_alignment(self.get_data_type(elemty))
if offset % align:
msg = "{rec}.{attr} of type {type} is not aligned".format(
rec=rectyp, attr=attr, type=elemty)
raise TypeError(msg)
def get_helper_class(self, typ, kind='value'):
"""
Get a helper class for the given *typ*.
"""
# XXX handle all types: complex, array, etc.
# XXX should it be a method on the model instead? this would allow a default kind...
return cgutils.create_struct_proxy(typ, kind)
def _make_helper(self, builder, typ, value=None, ref=None, kind='value'):
cls = self.get_helper_class(typ, kind)
return cls(self, builder, value=value, ref=ref)
def make_helper(self, builder, typ, value=None, ref=None):
"""
Get a helper object to access the *typ*'s members,
for the given value or reference.
"""
return self._make_helper(builder, typ, value, ref, kind='value')
def make_data_helper(self, builder, typ, ref=None):
"""
As make_helper(), but considers the value as stored in memory,
rather than a live value.
"""
return self._make_helper(builder, typ, ref=ref, kind='data')
def make_array(self, typ):
from numba.np import arrayobj
return arrayobj.make_array(typ)
def populate_array(self, arr, **kwargs):
"""
Populate array structure.
"""
from numba.np import arrayobj
return arrayobj.populate_array(arr, **kwargs)
def make_complex(self, builder, typ, value=None):
"""
Get a helper object to access the given complex numbers' members.
"""
assert isinstance(typ, types.Complex), typ
return self.make_helper(builder, typ, value)
def make_tuple(self, builder, typ, values):
"""
Create a tuple of the given *typ* containing the *values*.
"""
tup = self.get_constant_undef(typ)
for i, val in enumerate(values):
tup = builder.insert_value(tup, val, i)
return tup
def make_constant_array(self, builder, typ, ary):
"""
Create an array structure reifying the given constant array.
A low-level contiguous array constant is created in the LLVM IR.
"""
datatype = self.get_data_type(typ.dtype)
# don't freeze ary of non-contig or bigger than 1MB
size_limit = 10**6
if (self.allow_dynamic_globals and
(typ.layout not in 'FC' or ary.nbytes > size_limit)):
# get pointer from the ary
dataptr = ary.ctypes.data
data = self.add_dynamic_addr(builder, dataptr, info=str(type(dataptr)))
rt_addr = self.add_dynamic_addr(builder, id(ary), info=str(type(ary)))
else:
# Handle data: reify the flattened array in "C" or "F" order as a
# global array of bytes.
flat = ary.flatten(order=typ.layout)
# Note: we use `bytearray(flat.data)` instead of `bytearray(flat)` to
# workaround issue #1850 which is due to numpy issue #3147
consts = Constant.array(Type.int(8), bytearray(flat.data))
data = cgutils.global_constant(builder, ".const.array.data", consts)
# Ensure correct data alignment (issue #1933)
data.align = self.get_abi_alignment(datatype)
# No reference to parent ndarray
rt_addr = None
# Handle shape
llintp = self.get_value_type(types.intp)
shapevals = [self.get_constant(types.intp, s) for s in ary.shape]
cshape = Constant.array(llintp, shapevals)
# Handle strides
stridevals = [self.get_constant(types.intp, s) for s in ary.strides]
cstrides = Constant.array(llintp, stridevals)
# Create array structure
cary = self.make_array(typ)(self, builder)
intp_itemsize = self.get_constant(types.intp, ary.dtype.itemsize)
self.populate_array(cary,
data=builder.bitcast(data, cary.data.type),
shape=cshape,
strides=cstrides,
itemsize=intp_itemsize,
parent=rt_addr,
meminfo=None)
return cary._getvalue()
def add_dynamic_addr(self, builder, intaddr, info):
"""
Returns dynamic address as a void pointer `i8*`.
Internally, a global variable is added to inform the lowerer about
the usage of dynamic addresses. Caching will be disabled.
"""
assert self.allow_dynamic_globals, "dyn globals disabled in this target"
assert isinstance(intaddr, int), 'dyn addr not of int type'
mod = builder.module
llvoidptr = self.get_value_type(types.voidptr)
addr = self.get_constant(types.uintp, intaddr).inttoptr(llvoidptr)
# Use a unique name by embedding the address value
symname = 'numba.dynamic.globals.{:x}'.format(intaddr)
gv = cgutils.add_global_variable(mod, llvoidptr, symname)
# Use linkonce linkage to allow merging with other GV of the same name.
# And, avoid optimization from assuming its value.
gv.linkage = 'linkonce'
gv.initializer = addr
return builder.load(gv)
def get_abi_sizeof(self, ty):
"""
Get the ABI size of LLVM type *ty*.
"""
assert isinstance(ty, llvmir.Type), "Expected LLVM type"
return ty.get_abi_size(self.target_data)
def get_abi_alignment(self, ty):
"""
Get the ABI alignment of LLVM type *ty*.
"""
assert isinstance(ty, llvmir.Type), "Expected LLVM type"
return ty.get_abi_alignment(self.target_data)
def get_preferred_array_alignment(context, ty):
"""
Get preferred array alignment for Numba type *ty*.
"""
# AVX prefers 32-byte alignment
return 32
def post_lowering(self, mod, library):
"""Run target specific post-lowering transformation here.
"""
def create_module(self, name):
"""Create a LLVM module
The default implementation in BaseContext always raises a
``NotImplementedError`` exception. Subclasses should implement
this method.
"""
raise NotImplementedError
@property
def active_code_library(self):
"""Get the active code library
"""
return self._codelib_stack[-1]
@contextmanager
def push_code_library(self, lib):
"""Push the active code library for the context
"""
self._codelib_stack.append(lib)
try:
yield
finally:
self._codelib_stack.pop()
def add_linking_libs(self, libs):
"""Add iterable of linking libraries to the *active_code_library*.
"""
colib = self.active_code_library
for lib in libs:
colib.add_linking_library(lib)
def get_ufunc_info(self, ufunc_key):
"""Get the ufunc implementation for a given ufunc object.
The default implementation in BaseContext always raises a
``NotImplementedError`` exception. Subclasses may raise ``KeyError``
to signal that the given ``ufunc_key`` is not available.
Parameters
----------
ufunc_key : NumPy ufunc
Returns
-------
res : dict[str, callable]
A mapping of a NumPy ufunc type signature to a lower-level
implementation.
"""
raise NotImplementedError(f"{self} does not support ufunc")
class _wrap_impl(object):
"""
A wrapper object to call an implementation function with some predefined
(context, signature) arguments.
The wrapper also forwards attribute queries, which is important.
"""
def __init__(self, imp, context, sig):
self._callable = _wrap_missing_loc(imp)
self._imp = self._callable()
self._context = context
self._sig = sig
def __call__(self, builder, args, loc=None):
res = self._imp(self._context, builder, self._sig, args, loc=loc)
self._context.add_linking_libs(getattr(self, 'libs', ()))
return res
def __getattr__(self, item):
return getattr(self._imp, item)
def __repr__(self):
return "<wrapped %s>" % repr(self._callable)
def _has_loc(fn):
"""Does function *fn* take ``loc`` argument?
"""
sig = utils.pysignature(fn)
return 'loc' in sig.parameters
class _wrap_missing_loc(object):
def __init__(self, fn):
self.func = fn # store this to help with debug
def __call__(self):
"""Wrap function for missing ``loc`` keyword argument.
Otherwise, return the original *fn*.
"""
fn = self.func
if not _has_loc(fn):
def wrapper(*args, **kwargs):
kwargs.pop('loc') # drop unused loc
return fn(*args, **kwargs)
# Copy the following attributes from the wrapped.
# Following similar implementation as functools.wraps but
# ignore attributes if not available (i.e fix py2.7)
attrs = '__name__', 'libs'
for attr in attrs:
try:
val = getattr(fn, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, val)
return wrapper
else:
return fn
def __repr__(self):
return "<wrapped %s>" % self.func
@utils.runonce
def _initialize_llvm_lock_event():
"""Initial event triggers for LLVM lock
"""
def enter_fn():
event.start_event("numba:llvm_lock")
def exit_fn():
event.end_event("numba:llvm_lock")
ll.ffi.register_lock_callback(enter_fn, exit_fn)
_initialize_llvm_lock_event()
| |
import datetime
from xml.dom import minidom
from django.contrib.sites.models import Site
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.test.utils import requires_tz_support
from django.utils import timezone
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from .models import Article, Entry
TZ = timezone.get_default_timezone()
class FeedTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.e1 = Entry.objects.create(
title='My first entry', updated=datetime.datetime(1980, 1, 1, 12, 30),
published=datetime.datetime(1986, 9, 25, 20, 15, 00)
)
cls.e2 = Entry.objects.create(
title='My second entry', updated=datetime.datetime(2008, 1, 2, 12, 30),
published=datetime.datetime(2006, 3, 17, 18, 0)
)
cls.e3 = Entry.objects.create(
title='My third entry', updated=datetime.datetime(2008, 1, 2, 13, 30),
published=datetime.datetime(2005, 6, 14, 10, 45)
)
cls.e4 = Entry.objects.create(
title='A & B < C > D', updated=datetime.datetime(2008, 1, 3, 13, 30),
published=datetime.datetime(2005, 11, 25, 12, 11, 23)
)
cls.e5 = Entry.objects.create(
title='My last entry', updated=datetime.datetime(2013, 1, 20, 0, 0),
published=datetime.datetime(2013, 3, 25, 20, 0)
)
cls.a1 = Article.objects.create(title='My first article', entry=cls.e1)
def assertChildNodes(self, elem, expected):
actual = {n.nodeName for n in elem.childNodes}
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(
{i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'},
set(expected)
)
@override_settings(ROOT_URLCONF='syndication_tests.urls')
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('published').published
last_build_date = rfc2822_date(timezone.make_aware(d, TZ))
self.assertChildNodes(
chan, [
'title', 'link', 'description', 'language', 'lastBuildDate',
'item', 'atom:link', 'ttl', 'copyright', 'category',
]
)
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).published
pub_date = rfc2822_date(timezone.make_aware(d, TZ))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
# Assert that <guid> does not have any 'isPermaLink' attribute
self.assertIsNone(item.getElementsByTagName(
'guid')[0].attributes.get('isPermaLink'))
def test_rss2_feed_guid_permalink_false(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'false'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_false/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "false")
def test_rss2_feed_guid_permalink_true(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'true'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_true/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "true")
def test_rss2_single_enclosure(self):
response = self.client.get('/syndication/rss2/single-enclosure/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
enclosures = item.getElementsByTagName('enclosure')
self.assertEqual(len(enclosures), 1)
def test_rss2_multiple_enclosures(self):
with self.assertRaisesMessage(
ValueError,
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
):
self.client.get('/syndication/rss2/multiple-enclosure/')
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(
chan, [
'title', 'link', 'description', 'language', 'lastBuildDate',
'item', 'atom:link', 'ttl', 'copyright', 'category',
]
)
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(
feed,
['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author']
)
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'category',
'updated',
'published',
'rights',
'author',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_atom_feed_published_and_updated_elements(self):
"""
The published and updated elements are not
the same and now adhere to RFC 4287.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
entries = feed.getElementsByTagName('entry')
published = entries[0].getElementsByTagName('published')[0].firstChild.wholeText
updated = entries[0].getElementsByTagName('updated')[0].firstChild.wholeText
self.assertNotEqual(published, updated)
def test_atom_single_enclosure(self):
response = self.client.get('/syndication/atom/single-enclosure/')
feed = minidom.parseString(response.content).firstChild
items = feed.getElementsByTagName('entry')
for item in items:
links = item.getElementsByTagName('link')
links = [link for link in links if link.getAttribute('rel') == 'enclosure']
self.assertEqual(len(links), 1)
def test_atom_multiple_enclosures(self):
response = self.client.get('/syndication/atom/multiple-enclosure/')
feed = minidom.parseString(response.content).firstChild
items = feed.getElementsByTagName('entry')
for item in items:
links = item.getElementsByTagName('link')
links = [link for link in links if link.getAttribute('rel') == 'enclosure']
self.assertEqual(len(links), 2)
def test_latest_post_date(self):
"""
Both the published and updated dates are
considered when determining the latest post date.
"""
# this feed has a `published` element with the latest date
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
latest_published = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest_published)
# this feed has an `updated` element with the latest date
response = self.client.get('/syndication/latest/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.exclude(pk=5).latest('updated').updated
latest_updated = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest_updated)
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(
feed,
['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author']
)
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'ministry',
'rights',
'author',
'updated',
'published',
'category',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the received zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
latest = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
published = doc.getElementsByTagName('published')[0].firstChild.wholeText
self.assertEqual(published[-6:], '+00:42')
@requires_tz_support
def test_feed_last_modified_time_naive_date(self):
"""
Tests the Last-Modified header with naive publication dates.
"""
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Tue, 26 Mar 2013 01:00:00 GMT')
def test_feed_last_modified_time(self):
"""
Tests the Last-Modified header with aware publication dates.
"""
response = self.client.get('/syndication/aware-dates/')
self.assertEqual(response['Last-Modified'], 'Mon, 25 Mar 2013 19:18:00 GMT')
# No last-modified when feed has no item_pubdate
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
"""
The feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
An ImproperlyConfigured is raised if no link could be found for the
item(s).
"""
msg = (
'Give your Article class a get_absolute_url() method, or define '
'an item_link() method in your Feed class.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get('/syndication/articles/')
def test_template_feed(self):
"""
The item title and description can be overridden with templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry\n',
'description': 'Description in your templates: My first entry\n',
'link': 'http://example.com/blog/1/',
})
def test_template_context_feed(self):
"""
Custom context data can be passed to templates for title
and description.
"""
response = self.client.get('/syndication/template_context/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'My first entry (foo is bar)\n',
'description': 'My first entry (foo is bar)\n',
})
def test_add_domain(self):
"""
add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
| |
# Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def extract_to_next_token(s, match, invert = False):
i = 0
while i < len(s):
if (not invert and s[i] not in match) or \
(invert and s[i] in match):
break
i += 1
if i == 0:
return ('', s)
if i == len(s):
return (s, '')
return (s[:i], s[i:])
class UpdateLookupOpts(object):
destination_ip = None
local_ip = None
codecs = None
otherparams = None
remote_ip = None
remote_port = None
from_tag = None
to_tag = None
notify_socket = None
notify_tag = None
def __init__(self, s = None, *params):
if s == None:
self.destination_ip, self.local_ip, self.codecs, self.otherparams = params
return
self.otherparams = ''
while len(s) > 0:
if s[0] == 'R':
val, s = extract_to_next_token(s[1:], ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '.'))
val = val.strip()
if len(val) > 0:
self.destination_ip = val
if s[0] == 'L':
val, s = extract_to_next_token(s[1:], ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '.'))
val = val.strip()
if len(val) > 0:
self.local_ip = val
elif s[0] == 'c':
val, s = extract_to_next_token(s[1:], ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0', ','))
val = val.strip()
if len(val) > 0:
self.codecs = [int(x) for x in val.split(',')]
else:
val, s = extract_to_next_token(s, ('c', 'R'), True)
if len(val) > 0:
self.otherparams += val
def getstr(self, call_id, swaptags = False):
s = ''
if self.destination_ip != None:
s += 'R%s' % (self.destination_ip,)
if self.local_ip != None:
s += 'L%s' % (self.local_ip,)
if self.codecs != None:
s += 'c'
for codec in self.codecs:
s += '%s,' % (codec,)
s = s[:-1]
if self.otherparams != None and len(self.otherparams) > 0:
s += self.otherparams
s = '%s %s' % (s, call_id)
if self.remote_ip != None:
s = '%s %s' % (s, self.remote_ip)
if self.remote_port != None:
s = '%s %s' % (s, self.remote_port)
if not swaptags:
from_tag, to_tag = (self.from_tag, self.to_tag)
else:
if self.to_tag == None:
raise Exception('UpdateLookupOpts::getstr(swaptags = True): to_tag is not set')
to_tag, from_tag = (self.from_tag, self.to_tag)
if self.from_tag != None:
s = '%s %s' % (s, self.from_tag)
if self.to_tag != None:
s = '%s %s' % (s, self.to_tag)
if self.notify_socket != None:
s = '%s %s' % (s, self.notify_socket)
if self.notify_tag != None:
s = '%s %s' % (s, self.notify_tag)
return s
class Rtp_proxy_cmd(object):
type = None
ul_opts = None
command_opts = None
call_id = None
args = None
nretr = None
def __init__(self, cmd):
self.type = cmd[0].upper()
if self.type in ('U', 'L', 'D', 'P', 'S', 'R', 'C', 'Q'):
command_opts, self.call_id, args = cmd.split(None, 2)
if self.type in ('U', 'L'):
self.ul_opts = UpdateLookupOpts(command_opts[1:])
self.ul_opts.remote_ip, self.ul_opts.remote_port, args = args.split(None, 2)
args = args.split(None, 1)
self.ul_opts.from_tag = args[0]
if len(args) > 1:
args = args[1].split(None, 2)
if len(args) == 1:
self.ul_opts.to_tag = args[0]
elif len(args) == 2:
self.ul_opts.notify_socket, self.ul_opts.notify_tag = args
else:
self.ul_opts.to_tag, self.ul_opts.notify_socket, self.ul_opts.notify_tag = args
else:
self.args = args
self.command_opts = command_opts[1:]
elif self.type in ('G',):
if not cmd[1].isspace():
cparts = cmd[1:].split(None, 1)
if len(cparts) > 1:
self.command_opts, self.args = cparts
else:
self.command_opts = cparts[0]
else:
self.args = cmd[1:].strip()
else:
self.command_opts = cmd[1:]
def __str__(self):
s = self.type
if self.ul_opts != None:
s += self.ul_opts.getstr(self.call_id)
else:
if self.command_opts != None:
s += self.command_opts
if self.call_id != None:
s = '%s %s' % (s, self.call_id)
if self.args != None:
s = '%s %s' % (s, self.args)
return s
class Rtpp_stats(object):
spookyprefix = ''
verbose = False
def __init__(self, snames):
all_types = []
for sname in snames:
if sname != 'total_duration':
stype = int
else:
stype = float
self.__dict__[self.spookyprefix + sname] = stype()
all_types.append(stype)
self.all_names = tuple(snames)
self.all_types = tuple(all_types)
def __iadd__(self, other):
for sname in self.all_names:
aname = self.spookyprefix + sname
self.__dict__[aname] += other.__dict__[aname]
return self
def parseAndAdd(self, rstr):
rparts = rstr.split(None, len(self.all_names) - 1)
for i in range(0, len(self.all_names)):
stype = self.all_types[i]
rval = stype(rparts[i])
aname = self.spookyprefix + self.all_names[i]
self.__dict__[aname] += rval
def __str__(self):
aname = self.spookyprefix + self.all_names[0]
if self.verbose:
rval = '%s=%s' % (self.all_names[0], str(self.__dict__[aname]))
else:
rval = str(self.__dict__[aname])
for sname in self.all_names[1:]:
aname = self.spookyprefix + sname
if self.verbose:
rval += ' %s=%s' % (sname, str(self.__dict__[aname]))
else:
rval += ' %s' % str(self.__dict__[aname])
return rval
if __name__ == '__main__':
rc = Rtp_proxy_cmd('G nsess_created total_duration')
print rc
print rc.args
print rc.command_opts
rc = Rtp_proxy_cmd('Gv nsess_created total_duration')
print rc
print rc.args
print rc.command_opts
| |
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.config import cfg
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common import keystone
from ironic.common import pxe_utils
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules import agent
from ironic import objects
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as object_utils
INSTANCE_INFO = db_utils.get_test_agent_instance_info()
DRIVER_INFO = db_utils.get_test_agent_driver_info()
CONF = cfg.CONF
class TestAgentMethods(db_base.DbTestCase):
def setUp(self):
super(TestAgentMethods, self).setUp()
self.node = object_utils.create_test_node(self.context,
driver='fake_agent')
def test_build_agent_options_conf(self):
self.config(api_url='api-url', group='conductor')
options = agent.build_agent_options(self.node)
self.assertEqual('api-url', options['ipa-api-url'])
self.assertEqual('fake_agent', options['ipa-driver-name'])
@mock.patch.object(keystone, 'get_service_url')
def test_build_agent_options_keystone(self, get_url_mock):
self.config(api_url=None, group='conductor')
get_url_mock.return_value = 'api-url'
options = agent.build_agent_options(self.node)
self.assertEqual('api-url', options['ipa-api-url'])
self.assertEqual('fake_agent', options['ipa-driver-name'])
class TestAgentDeploy(db_base.DbTestCase):
def setUp(self):
super(TestAgentDeploy, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_agent')
self.driver = agent.AgentDeploy()
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO
}
self.node = object_utils.create_test_node(self.context, **n)
def test_validate(self):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.validate(task)
def test_validate_exception(self):
self.node.driver_info = {}
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.validate, task)
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
@mock.patch('ironic.conductor.utils.node_set_boot_device')
@mock.patch('ironic.conductor.utils.node_power_action')
def test_deploy(self, power_mock, bootdev_mock, dhcp_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
dhcp_opts = pxe_utils.dhcp_options_for_instance(task)
driver_return = self.driver.deploy(task)
self.assertEqual(driver_return, states.DEPLOYWAIT)
dhcp_mock.assert_called_once_with(task, dhcp_opts)
bootdev_mock.assert_called_once_with(task, 'pxe', persistent=True)
power_mock.assert_called_once_with(task,
states.REBOOT)
@mock.patch('ironic.conductor.utils.node_power_action')
def test_tear_down(self, power_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.tear_down(task)
power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(driver_return, states.DELETED)
def test_prepare(self):
pass
def test_clean_up(self):
pass
@mock.patch.object(dhcp_factory.DHCPFactory, 'update_dhcp')
def test_take_over(self, update_dhcp_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
task.driver.deploy.take_over(task)
update_dhcp_mock.assert_called_once_with(
task, CONF.agent.agent_pxe_bootfile_name)
class TestAgentVendor(db_base.DbTestCase):
def setUp(self):
super(TestAgentVendor, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
self.passthru = agent.AgentVendorInterface()
n = {
'driver': 'fake_pxe',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO
}
self.node = object_utils.create_test_node(self.context, **n)
def test_validate(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.passthru.validate(task)
@mock.patch('ironic.common.image_service.Service')
def test_continue_deploy(self, image_service_mock):
test_temp_url = 'http://image'
expected_image_info = {
'urls': [test_temp_url],
'id': 'fake-image',
'checksum': 'checksum'
}
client_mock = mock.Mock()
glance_mock = mock.Mock()
glance_mock.show.return_value = {}
glance_mock.swift_temp_url.return_value = test_temp_url
image_service_mock.return_value = glance_mock
self.passthru._client = client_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru._continue_deploy(task)
client_mock.prepare_image.assert_called_with(task.node,
expected_image_info)
self.assertEqual(task.node.provision_state, states.DEPLOYING)
def test_lookup_version_not_found(self):
kwargs = {
'version': '999',
}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.passthru._lookup,
task.context,
**kwargs)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'._find_node_by_macs')
def test_lookup_v2(self, find_mock):
kwargs = {
'version': '2',
'inventory': {
'interfaces': [
{
'mac_address': 'aa:bb:cc:dd:ee:ff',
'name': 'eth0'
},
{
'mac_address': 'ff:ee:dd:cc:bb:aa',
'name': 'eth1'
}
]
}
}
find_mock.return_value = self.node
with task_manager.acquire(self.context, self.node.uuid) as task:
node = self.passthru._lookup(task.context, **kwargs)
self.assertEqual(self.node, node['node'])
def test_lookup_v2_missing_inventory(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.passthru._lookup,
task.context)
def test_lookup_v2_empty_inventory(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.passthru._lookup,
task.context,
inventory={})
def test_lookup_v2_empty_interfaces(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.NodeNotFound,
self.passthru._lookup,
task.context,
version='2',
inventory={'interfaces': []})
@mock.patch.object(objects.Port, 'get_by_address')
def test_find_ports_by_macs(self, mock_get_port):
fake_port = object_utils.get_test_port(self.context)
mock_get_port.return_value = fake_port
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
ports = self.passthru._find_ports_by_macs(task, macs)
self.assertEqual(1, len(ports))
self.assertEqual(fake_port.uuid, ports[0].uuid)
self.assertEqual(fake_port.node_id, ports[0].node_id)
@mock.patch.object(objects.Port, 'get_by_address')
def test_find_ports_by_macs_bad_params(self, mock_get_port):
mock_get_port.side_effect = exception.PortNotFound(port="123")
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
empty_ids = self.passthru._find_ports_by_macs(task, macs)
self.assertEqual([], empty_ids)
@mock.patch('ironic.objects.node.Node.get_by_id')
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'._get_node_id')
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'._find_ports_by_macs')
def test_find_node_by_macs(self, ports_mock, node_id_mock, node_mock):
ports_mock.return_value = object_utils.get_test_port(self.context)
node_id_mock.return_value = '1'
node_mock.return_value = self.node
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
node = self.passthru._find_node_by_macs(task, macs)
self.assertEqual(node, node)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'._find_ports_by_macs')
def test_find_node_by_macs_no_ports(self, ports_mock):
ports_mock.return_value = []
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.assertRaises(exception.NodeNotFound,
self.passthru._find_node_by_macs,
task,
macs)
@mock.patch('ironic.objects.node.Node.get_by_uuid')
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'._get_node_id')
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'._find_ports_by_macs')
def test_find_node_by_macs_nodenotfound(self, ports_mock, node_id_mock,
node_mock):
port = object_utils.get_test_port(self.context)
ports_mock.return_value = [port]
node_id_mock.return_value = self.node['uuid']
node_mock.side_effect = [self.node,
exception.NodeNotFound(node=self.node)]
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.assertRaises(exception.NodeNotFound,
self.passthru._find_node_by_macs,
task,
macs)
def test_get_node_id(self):
fake_port1 = object_utils.get_test_port(self.context,
node_id=123,
address="aa:bb:cc:dd:ee:fe")
fake_port2 = object_utils.get_test_port(self.context,
node_id=123,
id=42,
address="aa:bb:cc:dd:ee:fb",
uuid='1be26c0b-03f2-4d2e-ae87-'
'c02d7f33c782')
node_id = self.passthru._get_node_id([fake_port1, fake_port2])
self.assertEqual(fake_port2.node_id, node_id)
def test_get_node_id_exception(self):
fake_port1 = object_utils.get_test_port(self.context,
node_id=123,
address="aa:bb:cc:dd:ee:fc")
fake_port2 = object_utils.get_test_port(self.context,
node_id=321,
id=42,
address="aa:bb:cc:dd:ee:fd",
uuid='1be26c0b-03f2-4d2e-ae87-'
'c02d7f33c782')
self.assertRaises(exception.NodeNotFound,
self.passthru._get_node_id,
[fake_port1, fake_port2])
def test_heartbeat(self):
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.passthru._heartbeat(task, **kwargs)
def test_heartbeat_bad(self):
kwargs = {}
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
self.passthru._heartbeat, task, **kwargs)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'._heartbeat')
def test_vendor_passthru_heartbeat(self, mock_heartbeat):
kwargs = {
'method': 'heartbeat',
}
self.passthru.vendor_routes['heartbeat'] = mock_heartbeat
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.passthru.vendor_passthru(task, **kwargs)
mock_heartbeat.assert_called_once_with(task, **kwargs)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'._heartbeat')
def test_vendor_passthru_heartbeat_ironic_exc(self, mock_heartbeat):
mock_heartbeat.side_effect = exception.IronicException()
kwargs = {
'method': 'heartbeat',
}
self.passthru.vendor_routes['heartbeat'] = mock_heartbeat
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.assertRaises(exception.IronicException,
self.passthru.vendor_passthru, task, **kwargs)
mock_heartbeat.assert_called_once_with(task, **kwargs)
@mock.patch('ironic.drivers.modules.agent.AgentVendorInterface'
'._heartbeat')
def test_vendor_passthru_heartbeat_exception(self, mock_heartbeat):
mock_heartbeat.side_effect = KeyError()
kwargs = {
'method': 'heartbeat',
}
self.passthru.vendor_routes['heartbeat'] = mock_heartbeat
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.assertRaises(exception.VendorPassthruException,
self.passthru.vendor_passthru, task, **kwargs)
mock_heartbeat.assert_called_once_with(task, **kwargs)
| |
import os
import shutil
from unittest import mock
import pytest
from click.testing import CliRunner
from great_expectations import DataContext
from great_expectations.cli.v012 import cli
from great_expectations.data_context.templates import CONFIG_VARIABLES_TEMPLATE
from great_expectations.data_context.util import file_relative_path
from great_expectations.util import gen_directory_tree_str
from tests.cli.v012.test_cli import yaml
from tests.cli.v012.utils import assert_no_logging_messages_or_tracebacks
from tests.test_utils import set_directory
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_yes_to_fixing_them(
mock_webbrowser,
caplog,
tmp_path_factory,
):
"""
This test walks through the onboarding experience.
The user just checked an existing project out of source control and does
not yet have an uncommitted directory.
"""
root_dir = tmp_path_factory.mktemp("hiya")
root_dir = str(root_dir)
os.makedirs(os.path.join(root_dir, "data"))
data_folder_path = os.path.join(root_dir, "data")
data_path = os.path.join(root_dir, "data", "Titanic.csv")
fixture_path = file_relative_path(
__file__, os.path.join("..", "..", "test_sets", "Titanic.csv")
)
shutil.copy(fixture_path, data_path)
# Create a new project from scratch that we will use for the test in the next step
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", root_dir],
input=f"\n\n1\n1\n{data_folder_path}\n\n\n\n2\n{data_path}\n\n\n\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/".format(
root_dir
)
in mock_webbrowser.call_args[0][0]
)
assert "Great Expectations is now set up." in stdout
context = DataContext(os.path.join(root_dir, DataContext.GE_DIR))
uncommitted_dir = os.path.join(context.root_directory, "uncommitted")
shutil.rmtree(uncommitted_dir)
assert not os.path.isdir(uncommitted_dir)
# Test the second invocation of init
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", root_dir], input="Y\nn\n", catch_exceptions=False
)
stdout = result.stdout
assert result.exit_code == 0
assert "Great Expectations added some missing files required to run." in stdout
assert "You may see new files in" in stdout
assert "OK. You must run" not in stdout
assert "great_expectations init" not in stdout
assert "to fix the missing files!" not in stdout
assert "Would you like to build & view this project's Data Docs!?" in stdout
assert os.path.isdir(uncommitted_dir)
config_var_path = os.path.join(uncommitted_dir, "config_variables.yml")
assert os.path.isfile(config_var_path)
with open(config_var_path) as f:
assert f.read() == CONFIG_VARIABLES_TEMPLATE
assert_no_logging_messages_or_tracebacks(caplog, result)
@pytest.mark.filterwarnings(
"ignore:DataAsset.remove_expectations*:DeprecationWarning:great_expectations.data_asset"
)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_on_complete_existing_project_all_uncommitted_dirs_exist(
mock_webbrowser,
caplog,
tmp_path_factory,
):
"""
This test walks through the onboarding experience.
The user just checked an existing project out of source control and does
not yet have an uncommitted directory.
"""
root_dir = tmp_path_factory.mktemp("hiya")
root_dir = str(root_dir)
os.makedirs(os.path.join(root_dir, "data"))
data_folder_path = os.path.join(root_dir, "data")
data_path = os.path.join(root_dir, "data", "Titanic.csv")
fixture_path = file_relative_path(
__file__, os.path.join("..", "..", "test_sets", "Titanic.csv")
)
shutil.copy(fixture_path, data_path)
# Create a new project from scratch that we will use for the test in the next step
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init", "-d", root_dir],
input="\n\n1\n1\n{}\n\n\n\n2\n{}\n\n\n\n".format(
data_folder_path, data_path, catch_exceptions=False
),
)
assert result.exit_code == 0
assert mock_webbrowser.call_count == 1
assert (
"{}/great_expectations/uncommitted/data_docs/local_site/validations/Titanic/warning/".format(
root_dir
)
in mock_webbrowser.call_args[0][0]
)
# Now the test begins - rerun the init on an existing project
runner = CliRunner(mix_stderr=False)
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli, ["init", "-d", root_dir], input="n\n", catch_exceptions=False
)
stdout = result.stdout
assert mock_webbrowser.call_count == 1
assert result.exit_code == 0
assert "This looks like an existing project that" in stdout
assert "appears complete" in stdout
assert "ready to roll" in stdout
assert "Would you like to build & view this project's Data Docs" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result)
@mock.patch("webbrowser.open", return_value=True, side_effect=None)
def test_cli_init_connection_string_non_working_db_connection_instructs_user_and_leaves_entries_in_config_files_for_debugging(
mock_webbrowser, caplog, tmp_path_factory, sa
):
root_dir = tmp_path_factory.mktemp("bad_con_string_test")
root_dir = str(root_dir)
with set_directory(root_dir):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["init"],
input="\n\n2\n6\nmy_db\nsqlite:////subfolder_thats_not_real/not_a_real.db\n\nn\n",
catch_exceptions=False,
)
stdout = result.output
assert mock_webbrowser.call_count == 0
assert "Always know what to expect from your data" in stdout
assert "What data would you like Great Expectations to connect to" in stdout
assert "Which database backend are you using" in stdout
assert (
"What is the url/connection string for the sqlalchemy connection" in stdout
)
assert "Give your new Datasource a short name" in stdout
assert (
"Attempting to connect to your database. This may take a moment" in stdout
)
assert "Cannot connect to the database" in stdout
assert "Profiling" not in stdout
assert "Building" not in stdout
assert "Data Docs" not in stdout
assert "Great Expectations is now set up" not in stdout
assert result.exit_code == 1
ge_dir = os.path.join(root_dir, DataContext.GE_DIR)
assert os.path.isdir(ge_dir)
config_path = os.path.join(ge_dir, DataContext.GE_YML)
assert os.path.isfile(config_path)
config = yaml.load(open(config_path))
assert config["datasources"] == {
"my_db": {
"data_asset_type": {
"module_name": None,
"class_name": "SqlAlchemyDataset",
},
"credentials": "${my_db}",
"class_name": "SqlAlchemyDatasource",
"module_name": "great_expectations.datasource",
}
}
config_path = os.path.join(
ge_dir, DataContext.GE_UNCOMMITTED_DIR, "config_variables.yml"
)
config = yaml.load(open(config_path))
assert config["my_db"] == {
"url": "sqlite:////subfolder_thats_not_real/not_a_real.db"
}
# Profilers are v014+ specific
os.rmdir(os.path.join(root_dir, "great_expectations", "profilers"))
obs_tree = gen_directory_tree_str(os.path.join(root_dir, "great_expectations"))
assert (
obs_tree
== """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
expectations/
.ge_store_backend_id
plugins/
custom_data_docs/
renderers/
styles/
data_docs_custom_styles.css
views/
uncommitted/
config_variables.yml
data_docs/
validations/
.ge_store_backend_id
"""
)
assert_no_logging_messages_or_tracebacks(caplog, result)
| |
# CODING=Utf-8
"""Plotting utilities used throughout the viewer.mpl package."""
import os
import atexit
import numpy as np
# TODO expensive import costs 75% of total time
# see: python -X importtime -c 'import pygimli'
# import matplotlib.animation as animation
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pygimli as pg
from pygimli.utils import prettyFloat
holdAxes__ = 0
def updateFig(fig, force=False, sleep=.0001):
"""For internal use."""
if not holdAxes__:
try:
fig.canvas.draw_idle()
if force:
fig.canvas.flush_events()
#fig.canvas.draw()
#pg.plt.show(block=False)
pg.plt.pause(sleep)
#time.sleep(sleep)
except BaseException as e:
print(fig, e)
pg.warn("Exception raised", e)
def updateAxes(ax, force=False):
"""For internal use."""
updateFig(ax.figure, force=force)
def hold(val=1):
"""TODO WRITEME."""
globals()[holdAxes__] = val
def waitOnExit():
backend = matplotlib.get_backend()
if not 'inline' in backend:
if 'Qt' in backend or 'Wx' in backend:
if len(plt.get_fignums()) > 0:
pg.info('Showing pending widgets on exit. '
'Close all figures or Ctrl-C to quit the programm')
pg.wait()
# this can't be changed after import
if pg.rc['waitOnExit'] is True:
atexit.register(waitOnExit)
def wait(**kwargs):
"""TODO WRITEME."""
# plt.pause seems to be broken in mpl:2.1
# ax.canvas.draw_onIdle()
updateAxes(plt.gca())
kp = kwargs.pop('untilKeyPressed', False)
if kp == True:
plt.waitforbuttonpress(**kwargs)
else:
plt.show(**kwargs)
def saveFigure(fig, filename, pdfTrim=False):
"""Save figure as pdf."""
if '.pdf' in filename:
filename = filename[0:filename.find('.pdf')]
fig.savefig(filename + '.pdf', bbox_inches='tight')
# pdfTrim=1
if pdfTrim:
try:
print("trying pdf2pdfS ... ")
os.system('pdf2pdfBB ' + filename + '.pdf')
os.system('pdf2pdfS ' + filename + '.pdf')
except BaseException as _:
print("fail local convert. Should be no problem.")
def saveAxes(ax, filename, adjust=False):
"""Save axes as pdf."""
if adjust:
adjustWorldAxes(ax)
updateAxes(ax, force=True)
saveFigure(ax.figure, filename)
def insertUnitAtNextLastTick(ax, unit, xlabel=True, position=-2):
"""Replace the last-but-one tick label by unit symbol."""
if xlabel:
labels = ax.get_xticklabels()
labels[position] = unit
ax.set_xticklabels(labels)
else:
labels = ax.get_yticklabels()
labels[-position] = unit
ax.set_yticklabels(labels)
def adjustWorldAxes(ax):
"""Set some common default properties for an axe."""
ax.set_ylabel('Depth (m)')
ax.set_xlabel('$x$ (m)')
renameDepthTicks(ax)
plt.tight_layout()
updateAxes(ax)
def renameDepthTicks(ax):
"""Switch signs of depth ticks to be positive"""
@ticker.FuncFormatter
def major_formatter(x, pos):
return prettyFloat(-x) % x
ax.yaxis.set_major_formatter(major_formatter)
updateAxes(ax)
def setOutputStyle(dim='w', paperMargin=5, xScale=1.0, yScale=1.0, fontsize=9,
scale=1, usetex=True):
"""Set preferred output style."""
if dim == 'w':
dim = 0
else:
dim = 1
a4 = [21.0, 29.7]
inches_per_cm = 1. / 2.54
# inches_per_pt = 1.0 / 72.27 # pt/inch (latex)
# goldenMean = (1.0 + np.sqrt(5.0)) / 2.0
textwidth = (a4[0] - paperMargin) * inches_per_cm
fig_width = textwidth * xScale # fig width in inches
fig_height = textwidth * yScale # fig height in inches
fig_size = [fig_width * scale, fig_height * scale]
# print "figsize:", fig_size
# fig.set_size_inches(fig_size)
# from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# rc('font',**{'family':'serif','serif':['Palatino']})
params = {
'backend': 'ps',
# 'font.weight' : 'bold',
'ax.labelsize': fontsize * scale,
'font.size': fontsize * scale,
'legend.fontsize': fontsize * scale,
'xtick.labelsize': fontsize * scale,
'ytick.labelsize': fontsize * scale,
# font.sans-serif : Bitstream Vera Sans, ...
# 'font.cmb10' : 'cmb10',
# 'font.family' : 'cursive',
'font.family': 'sans-serif',
# 'font.sans-serif' : 'Helvetica',
'text.usetex': usetex,
'figure.figsize': fig_size,
'xtick.major.pad': 4 * scale,
'xtick.minor.pad': 4 * scale,
'ytick.major.pad': 4 * scale,
'ytick.minor.pad': 4 * scale,
'xtick.major.size': 4 * scale, # major tick size in points
'xtick.minor.size': 2 * scale, # minor tick size in points
'ytick.major.size': 4 * scale, # major tick size in points
'ytick.minor.size': 2 * scale, # minor tick size in points
'lines.markersize': 6 * scale,
'lines.linewidth': 0.6 * scale
}
plt.rcParams.update(params)
def setPlotStuff(fontsize=7, dpi=None):
"""TODO merge with setOutputStyle.
Change ugly name.
"""
from matplotlib import rcParams
# print(rcParams.keys())
# rcParams['ax.labelsize'] = fontsize # REMOVED IN MPL.1.5
# rcParams['ax.titlesize'] = fontsize # REMOVED IN MPL.1.5
# rcParams['ax.linewidth'] = 0.3 # REMOVED IN MPL.1.5
rcParams['font.size'] = fontsize
rcParams['xtick.labelsize'] = fontsize
rcParams['ytick.labelsize'] = fontsize
rcParams['legend.fontsize'] = fontsize
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Helvetica'] # ['Times New Roman']
rcParams['text.usetex'] = False
# rcParams['figure.figsize'] = 7.3, 4.2
rcParams['xtick.major.size'] = 3
rcParams['xtick.major.width'] = 0.3
rcParams['xtick.minor.size'] = 1.5
rcParams['xtick.minor.width'] = 0.3
rcParams['ytick.major.size'] = rcParams['xtick.major.size']
rcParams['ytick.major.width'] = rcParams['xtick.major.width']
rcParams['ytick.minor.size'] = rcParams['xtick.minor.size']
rcParams['ytick.minor.width'] = rcParams['xtick.minor.width']
if dpi is not None:
rcParams['figure.dpi'] = dpi
rcParams['savefig.dpi'] = dpi
def createAnimation(fig, animate, nFrames, dpi, out):
"""Create animation for the content of a given matplotlib figure.
Until I know a better place.
"""
anim = animation.FuncAnimation(fig, animate, frames=nFrames,
interval=0.001, repeat=False)
anim.save(out + ".mp4", writer=None, fps=20, dpi=dpi, codec=None,
bitrate=24 * 1024, extra_args=None, metadata=None,
extra_anim=None, savefig_kwargs=None)
try:
print("Create frames ... ")
os.system('mkdir -p anim-' + out)
os.system('ffmpeg -i ' + out + '.mp4 anim-' + out + '/movie%d.jpg')
except BaseException as _:
pass
def saveAnimation(mesh, data, out, vData=None, plc=None, label='', cMin=None,
cMax=None, logScale=False, cmap=None, **kwargs):
"""Create and save an animation for a given mesh with a set of field data.
Until I know a better place.
"""
dpi = 92
scale = 1
fig = plt.figure(facecolor='white',
figsize=(scale * 800 / dpi, scale * 490 / dpi), dpi=dpi)
ax = fig.add_subplot(1, 1, 1)
gci = pg.viewer.mpl.drawModel(ax, mesh, data=data[0], cMin=cMin, cMax=cMax,
cMap=cmap, logScale=logScale)
pg.viewer.mpl.createColorbar(gci, label=label, pad=0.55)
if plc:
pg.show(plc, ax=ax)
adjustWorldAxes(ax)
def animate(i):
"""TODO WRITEME."""
print(out + ": Frame:", i, "/", len(data))
if vData is not None:
ax.clear()
pg.viewer.mpl.holdAxes_ = 1
pg.viewer.mpl.drawModel(ax, mesh, data=data[i], cMin=cMin,
cMax=cMax, cMap=cmap, logScale=logScale)
pg.viewer.mpl.drawStreams(ax, mesh, vData[i], **kwargs)
else:
print(min(data[i]), max(data[i]))
pg.viewer.mpl.setMappableData(gci, data[i], cMin=cMin, cMax=cMax,
logScale=logScale)
plt.pause(0.001)
createAnimation(fig, animate, int(len(data)), dpi, out)
def plotLines(ax, line_filename, linewidth=1.0, step=1):
"""Read lines from file and plot over model."""
xz = np.loadtxt(line_filename)
n_points = xz.shape[0]
if step == 2:
for i in range(0, n_points, step):
x = xz[i:i + step, 0]
z = xz[i:i + step, 1]
ax.plot(x, z, 'k-', linewidth=linewidth)
if step == 1:
ax.plot(xz[:, 0], xz[:, 1], 'k-', linewidth=linewidth)
def twin(ax):
"""Return the twin of ax if exist."""
for other_ax in ax.figure.axes:
if other_ax is ax:
continue
if other_ax.bbox.bounds == ax.bbox.bounds:
return other_ax
return None
def createTwinX(ax):
"""Utility function to create (or return existing) twin x axes for ax."""
return _createTwin(ax, 'twinx')
def createTwinY(ax):
"""Utility function to create (or return existing) twin x axes for ax."""
return _createTwin(ax, 'twiny')
def _createTwin(ax, funct):
"""Utility function to create (or return existing) twin x axes for ax."""
tax = None
for other_ax in ax.figure.axes:
if other_ax is ax:
continue
if other_ax.bbox.bounds == ax.bbox.bounds:
tax = other_ax
if tax is None:
tax = getattr(ax, funct)()
return tax
def isInteractive():
"""Returns False if a non-interactive backend is used, e.g. for Jupyter Notebooks and sphinx builds."""
backend = plt.get_backend().lower()
inlineBackend = "inline" in backend or backend == "agg"
return not inlineBackend
| |
""" Setting adaptive threshold"""
import numpy
import math
import warnings
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.prediction_vector import PredictionVector
class AdaptiveThresholdPreprocessingNode(BaseNode):
""" Setting adaptive threshold as described by Semmaoui, H., etal. (2012)
This node can be used to threshold a continuous signal with a adaptive threshold.
The advantage over a simple fixed threshold method is the adaption to the signal.
For example if a sensor value drifts over time either in positive or negative
direction, a fixed threshold method can have big problems with this one. For
a negative drift the "zero" value may get so low that the fixed threshold is
never reached again, the other way round a positive drift can lead to a continuous
overcoming of the fixed threshold. The adaptive threshold is based on the following
publication:
Semmaoui, H., etal. (2012).
Setting adaptive spike detection threshold for smoothed TEO based on robust statistics theory.
IEEE Transactions on Biomedical Engineering, 59(2):474 - 482.
(http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=06070974)
The formula is given as:
.. math:: T(t) = mean(t)_N + p*std(t)_N,
where T(t) is the threshold at a given timepoint t, mean(t)_N is the mean at
timepoint t calculated over the last N samples p is the sensitivity factor and
std(t)_N is the standard deviation at timepoint t calculated over the last N
samples.
The processing is split into two parts, this node implements the first part
which does the actual thresholding and safes a timeseries containing zeros,
accept at those timepoints where the signal exceeded the threshold. NOTICE
only the very first timepoint where the signal overcame the threshold is
marked with the value 1 all other values remain a zero. In a second step
see "AdaptiveThresholdClassifierNode" below the results are transfered into
prediction vectors. This is done since the threshold methods needs the whole
data in order to continuously calculate the mean and std. dev., otherwise the
first N samples of each window could not be used for analysis. IMPORTANT the
preprocessing has to be done without any windowing accept the NULL marker,
with a fixed nullmarkerstride.
**Parameters**
:width_adaptiveThreshold:
Specifies the width of the window used for calculating the mean and
the standard deviation for the threshold in ms
(*optional, default:2000*)
:p_adaptiveThreshold:
Specifies the p for the adaptive threshold
(*optional, default:8*)
:time_below_threshold:
Specifies how long the signal has to be below the signal before a new
thresholding is allowed in ms. This is helpful if only the beginning
of some event should be detected in the signal.
(*optional, default:1000*)
**Exemplary Call**
.. code-block:: yaml
-
node : AdaptiveThreshold_Preprocessing
parameters :
width_adaptive_threshold : 2000
p_adaptive_threshold : 8
time_below_threshold : 1000
:Author: Marc Tabie (mtabie@informatik.uni-bremen.de)
:Created: 2013/01/17
:Last change: 2013/01/23 by Marc Tabie
"""
input_types=["TimeSeries"]
def __init__(self, width_adaptive_threshold = 2000, p_adaptive_threshold = 8, time_below_threshold = 1000, **kwargs):
super(AdaptiveThresholdPreprocessingNode, self).__init__(**kwargs)
self.set_permanent_attributes(width_AT = width_adaptive_threshold, #Width of the adaptive threshold
ringbuffer_AT = None, #Ringbuffer for storing old data for the adaptive Threshold
p_AT = p_adaptive_threshold, #p of the adaptive threshold
variables_AT = [0,0,0,0], #Values for calculating the adaptive threshold see function adaptive_threshold()
below_threshold = None, #Array which indicates how long each signal was below the threshold
time_below_threshold = time_below_threshold) #Time in ms where the signal has to below the threshold in order to make a new detection
def is_trainable(self):
""" Returns whether this node is trainable. """
return False
def is_supervised(self):
""" Returns whether this node requires supervised training """
return False
def _execute(self, x):
""" Executes the preprocessing on the given data vector x"""
#Number of retained channels
num_channels = numpy.size(x,1)
if(self.below_threshold == None):
# When the node is called for the first time initialize all parameters/variables
self.width_AT = int((self.width_AT*x.sampling_frequency)/1000.)
#Convert the time from ms to samples
self.time_below_threshold = int((self.time_below_threshold*x.sampling_frequency)/1000.)
#Create and prefill the array which indicates how long a signal was below the threshold
self.below_threshold = numpy.zeros(num_channels)
self.below_threshold.fill(self.time_below_threshold+1)
#Create the ringbuffer and the variables list for the adaptive threshold
self.ringbuffer_AT=numpy.zeros((self.width_AT,num_channels))
self.variables_AT=numpy.zeros((4,num_channels))
data=x.view(numpy.ndarray)
#Create the array for the thresholded data
threshold_data = numpy.zeros(data.shape)
#For each sample of each retained channel
for i in range(num_channels):
data_index = 0
for sample in data[:,i]:
#calculate the adaptive threshold
value = self.adaptive_threshold(sample, i)
#if the actual sample exceeds the threshold...
if(sample >= value):
#and the resting time was observed
if(self.below_threshold[i] > self.time_below_threshold):
#store a 1 indicating a onset
threshold_data[data_index][i] = 1
#reset the resting time counter
self.below_threshold[i] = 0
#increase the time the signal was below the signal
else:
self.below_threshold[i] += 1
data_index += 1
#return the thresholded data
result_time_series = TimeSeries.replace_data(x, threshold_data)
return result_time_series
def get_output_type(self, input_type, as_string=True):
return self.string_to_class("TimeSeries")
def adaptive_threshold(self, data_point, channel_counter):
"""Adaptive threshold for single values
data_point = new datapoint
channel_counter = index for the retained channels in the ringbuffer
"""
i=int(self.variables_AT[1][channel_counter])
n = self.width_AT
S1 = float(self.variables_AT[2][channel_counter] + (data_point - self.ringbuffer_AT[i][channel_counter])\
* ((n-1.0) * data_point + (n+1.0) * self.ringbuffer_AT[i][channel_counter] - (2.0 * self.variables_AT[3][channel_counter])))
self.variables_AT[2][channel_counter] = S1
self.variables_AT[3][channel_counter] = self.variables_AT[3][channel_counter]+(data_point-self.ringbuffer_AT[i][channel_counter])
self.variables_AT[0][channel_counter] = self.p_AT*math.sqrt(S1/(n*n)) + (self.variables_AT[3][channel_counter]/n)
self.ringbuffer_AT[i][channel_counter] = data_point
i = i+1.0
if(i>=n):
i = 0.0;
self.variables_AT[1][channel_counter] = i
return self.variables_AT[0][channel_counter]
class AdaptiveThresholdClassifierNode(BaseNode):
""" Adaptive threshold onset detection classifier
This node parses timeseries generated by the "AdaptiveThresholdPreprocessingNode"
Basically each data channel of the windows passed to this node are scanned for
values equal to 1. If in enough channels specified by num_channels_above_threshold
the value 1 is found this window is labeled with the positive class otherwise it
belongs to the negative class
**Parameters**
:class_labels:
Specifies the names corresponding to the two classes separated
by the threshold method. NOTICE first give the negative class
followed by the positive one
(*optional, default:['noMovement','Movement']*)
:num_channels_above_threshold:
Specifies how many channels inside a window have to exceed the
threshold in order to detect an onset
(*optional, default:1*)
**Exemplary Call**
.. code-block:: yaml
-
node : AdaptiveThreshold_Classifier
:Author: Marc Tabie (mtabie@informatik.uni-bremen.de)
:Created: 2013/01/17
:Last change: 2013/01/23 by Marc Tabie
"""
def __init__(self, class_labels = ['no_movement', 'movement'], num_channels_above_threshold=1, **kwargs):
super(AdaptiveThresholdClassifierNode, self).__init__(**kwargs)
self.set_permanent_attributes(labels = class_labels, #Labels for the different classes
num_channels_above=num_channels_above_threshold, test=0)
def is_trainable(self):
""" Returns whether this node is trainable. """
return False
def is_supervised(self):
""" Returns whether this node requires supervised training """
return False
def _execute(self, x):
""" Executes the classifier on the given data vector x"""
num_channels = numpy.size(x,1)
data=x.view(numpy.ndarray)
if(self.num_channels_above <= 0):
warnings.warn("num_channels_above_threshold was set to %d. The value has to be greater then zero, now its set to 1" %(self.num_channels_above))
self.num_channels_above = 1
elif(self.num_channels_above > num_channels):
warnings.warn("num_channels_above_threshold was set to %d. But only %d channels are retained, now its set to %d" %(self.num_channels_above,num_channels,num_channels))
self.num_channels_above = num_channels
movements_found = numpy.zeros(num_channels)
#For each sample of each retained channel
for i in range(num_channels):
if(numpy.any(data[:,i])):
movements_found[i] = 1
# If onsets in enough channels were found label with positive vale else with negative
label = self.labels[1] if numpy.sum(movements_found) >= self.num_channels_above else self.labels[0]
return PredictionVector(label=label,
prediction=self.labels.index(label),
predictor=self)
_NODE_MAPPING = {"AdaptiveThreshold_Preprocessing": AdaptiveThresholdPreprocessingNode,
"AdaptiveThreshold_Classifier": AdaptiveThresholdClassifierNode}
| |
import os
import sys
from urllib import quote
import xml.etree.ElementTree as ET
import shutil
import glob
from django.conf import settings
from django.test import TestCase
from django.test import LiveServerTestCase
from django.core.urlresolvers import reverse
from django.db.models.base import ValidationError
from django.contrib.auth.models import User
from django_webtest import WebTest
from collection_record.forms import CollectionRecordForm
from collection_record.models import CollectionRecord
from collection_record.models import SupplementalFile
from collection_record.perm_backend import CollectionRecordPermissionBackend
from collection_record.perm_backend import get_publishing_institutions_for_user
debug_print = lambda x: sys.stdout.write(x+'\n\n') if os.environ.get('DEBUG', False) else lambda x: x
class CollectionRecordTestDirSetupMixin(object):
'''Mixin to add override of output directory for EAD files'''
dir_root = os.path.join(os.path.abspath(os.path.split(__file__)[0]), 'data')
def setUp(self):
'''Override the "databases" config file to use the test shoulder'''
## os.environ['DATABASES_XML_FILE'] = os.path.join(os.environ['HOME'], '.databases-test.xml')
os.environ['EAD_ROOT_DIR'] = CollectionRecordTestDirSetupMixin.dir_root
if not os.path.isdir(CollectionRecordTestDirSetupMixin.dir_root):
os.makedirs(CollectionRecordTestDirSetupMixin.dir_root)
debug_print( "TEST DIR ROOT==========>" + CollectionRecordTestDirSetupMixin.dir_root)
super(CollectionRecordTestDirSetupMixin, self).setUp()
def tearDown(self):
debug_print("DELETING TEST DIR------------>" + CollectionRecordTestDirSetupMixin.dir_root)
if os.path.isdir(CollectionRecordTestDirSetupMixin.dir_root):
shutil.rmtree(CollectionRecordTestDirSetupMixin.dir_root)
super(CollectionRecordTestDirSetupMixin, self).tearDown()
class CollectionRecordModelTest(CollectionRecordTestDirSetupMixin, TestCase):
'''Test the CollectionRecord django model'''
fixtures = ['collection_record.collectionrecord.json', 'collection_record.dublincore.json', 'collection_record.supplementalfile.json', 'collection_record.publishinginstitution.json', 'collection_record.auth.user.json']
def testModelExists(self):
rec = CollectionRecord()
self.failUnlessRaises(ValidationError, rec.full_clean)
def testEZID_DublinCoreUpdate(self):
'''Test that the Dublin Core attrs of the EZID get updateed by
a save of the object.
'''
pass
def testEAD_xml_output(self):
'''Test the ead string output for a CollectionRecord. Check unicode
support
'''
rec = CollectionRecord.objects.get(pk="1")
ead_xml = rec.ead_xml
self.failUnless(ead_xml.index('<?xml') == 0)
self.failUnless('<ead>' in ead_xml)
self.failUnless('1' in ead_xml)
self.failUnless('persname' in ead_xml)
self.failUnless('<physdesc label="Extent">' in ead_xml)
self.failUnless('<repository label="' in ead_xml)
self.failUnless('<abstract label="Abstract">' in ead_xml)
self.failUnless('<langmaterial><language langcode="' in ead_xml)
self.failUnless('<accessrestrict id="accessrestrict"><head>Access</head><p>' in ead_xml)
self.failUnless('<userestrict id="userestrict"><head>Publication Rights</head><p>' in ead_xml)
self.failUnless('<prefercite id="prefercite"><head>Preferred Citation</head>' in ead_xml)
self.failUnless('<acqinfo id="acqinfo"><head>Acquisition Information</head>' in ead_xml)
self.failUnless('<bioghist id="bioghist"><head>Biography/Administrative History</head>' in ead_xml)
self.failUnless('<scopecontent id="scopecontent"><head>Scope and Content of Collection</head>' in ead_xml)
self.failUnless('<controlaccess id="controlaccess">' in ead_xml)
self.failUnless('id="archdesc' in ead_xml)
self.failUnless('</archdesc>' in ead_xml)
self.failUnless('</ead>' in ead_xml)
self.failUnless('repositorycode="'+rec.publisher.mainagency+'" countrycode="US">'+rec.local_identifier+'</unitid>' in ead_xml)
self.failIf('<!DOCTYPE' in ead_xml)
self.failUnless('UC' in ead_xml)
try:
etree = ET.XML(ead_xml.encode('utf-8'))
except:
import sys
print sys.exc_info()
self.fail('ElementTree could not parse xml')
archdesc = etree.find('archdesc')
did = archdesc.find('did')
corpname = did.find('repository/corpname')
self.failUnless('UC' in corpname.text)
prefercite_p = archdesc.find('prefercite/p')
self.failUnless('UC' in prefercite_p.text)
unitdate = did.find('unitdate')
self.failIf(unitdate.text is None)
def testEAD_iso_date(self):
'''Check that the unitdate "normal" attribute only shows up for
records with date_iso
'''
rec = CollectionRecord.objects.get(pk="2")
ead_xml = rec.ead_xml
try:
etree = ET.XML(ead_xml.encode('utf-8'))
except:
import sys
print sys.exc_info()
self.fail('ElementTree could not parse xml')
archdesc = etree.find('archdesc')
did = archdesc.find('did')
unitdate = did.find('unitdate')
self.failIf(unitdate.text is None)
self.failIf('normal' in unitdate.attrib)
rec = CollectionRecord.objects.get(pk="1")
ead_xml = rec.ead_xml
try:
etree = ET.XML(ead_xml.encode('utf-8'))
except:
import sys
print sys.exc_info()
self.fail('ElementTree could not parse xml')
archdesc = etree.find('archdesc')
did = archdesc.find('did')
unitdate = did.find('unitdate')
self.failIf(unitdate.text is None)
self.failUnless('normal' in unitdate.attrib)
def testEAD_xml_with_files_output(self):
rec = CollectionRecord.objects.get(pk="4")
ead_xml = rec.ead_xml
try:
ET.fromstring(ead_xml)
except:
self.fail('ElementTree could not parse xml')
self.failUnless('</archdesc>' in ead_xml)
self.failUnless('<otherfindaid' in ead_xml)
self.failUnless('</extref>' in ead_xml)
self.failUnless('test-2.pdf' in ead_xml)
def testEAD_file_save(self):
rec = CollectionRecord.objects.get(pk="1")
dir_root = os.path.join(os.path.abspath(os.path.split(__file__)[0]), 'data')
rec.dir_root = dir_root
if not os.path.isdir(rec.ead_dir):
os.makedirs(rec.ead_dir)
rec.save_ead_file()
if not os.path.exists(rec.ead_filename):
self.fail('Did not create EAD file %s' %(rec.ead_filename,))
def testXMLURL(self):
'''test that the xml url function exists & returns something.
'''
rec = CollectionRecord.objects.get(pk="4")
url = rec.get_xml_url
self.failUnless(url is not None)
def testEAD_file_remove_on_delete(self):
'''Test that the EAD xml file is removed when the Collection Record is
deleted
'''
rec = CollectionRecord.objects.get(pk="1")
dir_root = os.path.join(os.path.abspath(os.path.split(__file__)[0]), 'data')
rec.dir_root = dir_root
if not os.path.isdir(rec.ead_dir):
os.makedirs(rec.ead_dir)
rec.save_ead_file()
ead_fname = rec.ead_filename
rec.delete()
if os.path.exists(ead_fname):
self.fail('Did not delete ead file %s' % (ead_fname,))
class CollectionRecordFormTestCase(CollectionRecordTestDirSetupMixin, TestCase):
'''Test the form for creating new collection records. Is this form different
from the existing record form?
'''
def testNewForm(self):
f = CollectionRecordForm()
class CollectionRecordViewAllTestCase(CollectionRecordTestDirSetupMixin, TestCase):
'''Test the view of all collection records for a a user
'''
fixtures = ['collection_record.collectionrecord.json', 'collection_record.dublincore.json', 'collection_record.publishinginstitution.json', 'collection_record.auth.user.json']
##### def setUp(self):
##### super(CollectionRecordViewAllTestCase, self).setUp()
#####
def testViewAllCollectionRecords(self):
'''Verify that the user can see their institution's collection records
and not others.
'''
url = reverse('collection_record_view_all', args=None)
ret = self.client.login(username='testuser',password='testuser')
response = self.client.get(url)
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'Collection')
self.assertContains(response, '/collection-record/')
ret = self.client.login(username='admin', password='admin')
self.failUnless(ret)
response = self.client.get(url)
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'Collection')
self.assertContains(response, '5')
self.assertContains(response, '/collection-record/')
def testLinksOnCollectionRecordListPage(self):
'''Check that some links do exist on the collection record list page
'''
url = reverse('collection_record_view_all', args=None)
ret = self.client.login(username='testuser',password='testuser')
response = self.client.get(url)
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, '/collection-record/')
url_add = reverse('collection_record_add', args=None)
self.assertContains(response, url_add)
rec = CollectionRecord.objects.get(pk='2')
url_rec = rec.get_absolute_url()
self.assertContains(response, url_rec)
url_xml = rec.get_xml_url()
self.assertContains(response, url_xml)
#TODO:this is going to require a live test server for xtf to talk to
###class CollectionRecordXMLViewTestCase(CollectionRecordTestDirSetupMixin, WebTest):
### '''Test views of the CollectionRecord'''
### fixtures = ['collection_record.collectionrecord.json', 'collection_record.dublincore.json', 'collection_record.publishinginstitution.json', 'collection_record.auth.user.json']
### def setUp(self):
### super(CollectionRecordXMLViewTestCase, self).setUp()
###
###
### def testXMLView(self):
### rec = CollectionRecord.objects.get(pk="1")
### url = rec.get_absolute_url() + '/xml/'
### ret = self.client.login(username='testuser',password='testuser')
### response = self.client.get(url)
### self.failUnlessEqual(200, response.status_code)
### self.assertContains(response, '<ead>')
### self.assertContains(response, 'Banc')
class CollectionRecordEditTestCase(CollectionRecordTestDirSetupMixin, WebTest, LiveServerTestCase):
'''Test the edit page for the collection records. Should be able to modify
all data (main & assoc. DCs) and delete and add DC stored data
'''
fixtures = ['collection_record.collectionrecord.json', 'collection_record.dublincore.json', 'collection_record.publishinginstitution.json', 'collection_record.auth.user.json']
# fixtures = ['collectionrecord.json', 'dublincore.json', 'publishinginstitution.json', 'auth.user.json']
csrf_checks = False
def setUp(self):
super(CollectionRecordEditTestCase, self).setUp()
rec = CollectionRecord.objects.get(pk="1")
if not os.path.isdir(rec.ead_dir):
os.makedirs(rec.ead_dir)
def testEditPageAuth(self):
rec = CollectionRecord.objects.get(pk="1")
url = rec.get_edit_url()
response = self.app.get(url)
self.failUnlessEqual('302 FOUND', response.status)
self.failUnlessEqual(302, response.status_code)
self.assertTrue(settings.LOGIN_URL+'?next='+quote(url), response.headers['location'])
response = self.app.get(url, user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'itle')
self.assertContains(response, '<option value="eng" selected="selected">English</option>')
self.assertContains(response, 'access')
self.assertContains(response, 'logout')
def testEditAttr(self):
'''Edit a directly associated value of the Record'''
rec = CollectionRecord.objects.get(pk="4")
if not os.path.isdir(rec.ead_dir):
os.makedirs(rec.ead_dir)
url = rec.get_edit_url()
response = self.app.get(url, user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'logout')
form = response.forms['main_form']
#fill out basic info only,required fields only
form['title'] = 'Test Title'
form['title_filing'] = 'Test Filing Title'
form['date_dacs'] = 'circa 1980'
form['date_iso'] = '1980'
form['local_identifier'] = 'LOCALID-test'
form['extent'] = 'loads of boxes'
form['abstract'] = 'a nice test collection'
form['accessrestrict'] = 'public domain'
form['userestrict'] = 'go craxy'
form['acqinfo'] = 'by mark'
form['scopecontent'] = 'test content'
response = form.submit(user='testuser')
self.failUnlessEqual(302, response.status_code)
response.follow()
self.assertTemplateUsed(response,'collection_record/collection_record/ead_template.xml')
response = self.app.get(url, user='testuser')
self.assertContains(response, 'logout')
form = response.forms['main_form']
form['title'] = ''
response = form.submit(user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertTemplateUsed(response,'collection_record/collection_record/edit.html')
self.assertContains(response, 'errorlist')
def testEditDCTerm(self):
'''Test the editing of a term stored in an associated DC object
'''
u = User.objects.get(username="testuser")
rec = CollectionRecord.objects.get(pk="1")
url = rec.get_edit_url()
response = self.app.get(url, user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'logout')
form = response.forms['main_form']
newPerson = 'Mark Redar Test'
form['person-0-content'] = newPerson
response = form.submit(user='testuser')
self.failUnlessEqual(302, response.status_code)
#self.assertRedirects(response, rec.get_absolute_url())
response.follow(user='testuser')
self.assertTemplateUsed(response,'collection_record/collection_record/ead_template.xml')
#NOTE: Currently can't test the updated "view" of the object because
# of the xtf interaction, it goes to live back server
response = self.app.get(url, user='testuser')
self.assertTrue(newPerson in response)
self.assertContains(response, newPerson)
self.assertContains(response, 'logout')
response = self.app.get(url, user='testuser')
form['person-0-content'] = ''
response = form.submit(user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'errorlist')
def testDeletionOfDCTerm(self):
'''Test the deletion of a term'''
pass
class NewCollectionRecordViewTestCase(CollectionRecordTestDirSetupMixin, WebTest):
fixtures = ['collection_record.publishinginstitution.json', 'collection_record.auth.user.json']
def setUp(self):
testuser = User.objects.get(username='testuser')
for i in get_publishing_institutions_for_user(testuser):
inst_dir = os.path.join(CollectionRecordTestDirSetupMixin.dir_root, i.cdlpath)
if not os.path.exists(inst_dir):
os.makedirs(inst_dir)
super(NewCollectionRecordViewTestCase, self).setUp()
def parseARK(self, url_string):
'''Parse the ark from the string'''
ark_from_url = url_string[url_string.index('ark'):]
ark_from_url = ark_from_url.rstrip('/')
return ark_from_url
def parsePK(self, url_string):
pk_from_url = url_string.rstrip('/').rsplit('/',1)[1]
return pk_from_url
def fill_form_values(self, form):
'''Helper function to fill in form values for valid submission
form is an Webtest response.form object
'''
#fill out basic info only,required fields only
form['title'] = 'Test Title'
form['title_filing'] = 'Test Filing Title'
form['date_dacs'] = 'circa 1980'
form['date_iso'] = '1980'
form['local_identifier'] = 'LOCALID-test'
form['extent'] = 'loads of boxes'
form['abstract'] = 'a nice test collection'
form['accessrestrict'] = 'public domain'
#form['userestrict'] = 'go craxy'
#form['acqinfo'] = 'by mark'
form['scopecontent'] = 'test content'
def createNewMinimalCR(self):
'''A helper function to create a new Collection Record with
a known set of data
'''
url = reverse('collection_record_add')
response = self.app.get(url, user='testuser')
form = response.form
#fill out basic info only,required fields only
self.fill_form_values(form)
response = form.submit(user='testuser')
self.failUnlessEqual(302, response.status_code)
response = response.follow()
self.failUnlessEqual(200, response.status_code)
#can't test without a live server, xtf needs to talk to
pk_from_url = self.parsePK(response.request.url)
cr=CollectionRecord.objects.get(pk=pk_from_url)
response = self.app.get(cr.get_edit_url(), user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'Test Title')
def testNewView(self):
'''Test the view for creating new collection records.
View needs to be login protected.
'''
url = reverse('collection_record_add')
response = self.app.get(url)
self.failUnlessEqual('302 FOUND', response.status)
self.failUnlessEqual(302, response.status_code)
self.assertTrue(settings.LOGIN_URL+'?next='+quote(url), response.headers['location'])
response = self.app.get(url, user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'itle')
self.assertContains(response, '<option value="eng" selected="selected">English</option>')
self.assertContains(response, 'access')
self.assertContains(response, 'person')
self.assertContains(response, 'family')
form = response.form
response = form.submit(user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertTemplateUsed(response,'collection_record/collection_record/add.html')
self.createNewMinimalCR()
def testDuplicateLocalID(self):
'''Test that duplicate local IDs can be entered. Some insts use a
boilerplate identical string for all their collections.
'''
url = reverse('collection_record_add')
response = self.app.get(url, user='testuser')
self.failUnlessEqual(200, response.status_code)
form = response.form
#fill out basic info only,required fields only
form['title'] = 'Test Title'
form['title_filing'] = 'Test Filing Title'
form['date_dacs'] = 'circa 1980'
form['date_iso'] = '1980'
form['local_identifier'] = 'LOCALID-test'
form['extent'] = 'loads of boxes'
form['abstract'] = 'a nice test collection'
form['accessrestrict'] = 'public domain'
form['userestrict'] = 'go craxy'
form['acqinfo'] = 'by mark'
form['scopecontent'] = 'test content'
response = form.submit(user='testuser')
self.failUnlessEqual(302, response.status_code)
response = response.follow()
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'LOCALID')
url = reverse('collection_record_add')
response = self.app.get(url, user='testuser')
self.failUnlessEqual(200, response.status_code)
form = response.form
#fill out basic info only,required fields only
form['title'] = 'Test Title'
form['title_filing'] = 'NO DUP Test Filing Title'
form['date_dacs'] = 'circa 1980'
form['date_iso'] = '1980'
form['local_identifier'] = 'LOCALID-test'
form['extent'] = 'loads of boxes'
form['abstract'] = 'a nice test collection'
form['accessrestrict'] = 'public domain'
form['userestrict'] = 'go craxy'
form['acqinfo'] = 'by mark'
form['scopecontent'] = 'test content'
response = form.submit(user='testuser')
self.failUnlessEqual(302, response.status_code)
response = response.follow()
self.failUnlessEqual(200, response.status_code)
def testNewWithDCView(self):
url = reverse('collection_record_add')
response = self.app.get(url, user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertTemplateUsed(response,'collection_record/collection_record/add.html')
form = response.form
#fill out basic info only,required fields only
form['title'] = 'Test 2 Title'
form['title_filing'] = 'Test Filing Title'
form['date_dacs'] = 'circa 1980'
form['date_iso'] = '1980'
form['local_identifier'] = 'LOCALID-test'
form['extent'] = 'loads of boxes'
form['abstract'] = 'a nice test collection'
form['accessrestrict'] = 'public domain'
form['userestrict'] = 'go craxy'
form['acqinfo'] = 'by mark'
form['scopecontent'] = 'test content'
form['person-0-content'] = 'mark redar'
form['family-0-content'] = 'redar'
response = form.submit(user='testuser')
self.failUnlessEqual(302, response.status_code)
response = response.follow()
self.failUnlessEqual(200, response.status_code)
#goto edit page to confirm, need live server to test view
pk_from_url = self.parsePK(response.request.url)
#ark_from_url = self.parseARK(response.request.url)
cr=CollectionRecord.objects.get(pk=pk_from_url)
response = self.app.get(cr.get_edit_url(), user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'Test 2 Title')
self.assertContains(response, 'redar')
self.assertTemplateUsed(response,'collection_record/collection_record/edit.html')
def testNewWithARK(self):
'''Test the collection editor basic function when you've got an ARK already
'''
url = reverse('collection_record_add')
response = self.app.get(url, user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'itle')
self.assertContains(response, '<option value="eng" selected="selected">English</option>')
self.assertContains(response, 'access')
self.assertContains(response, 'person')
self.assertContains(response, 'family')
form = response.form
response = form.submit(user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertTemplateUsed(response,'collection_record/collection_record/add.html')
form = response.form
#fill out basic info only,required fields only
form['ark'] = 'hh' #bad ark should fail
form['title'] = 'Test Title'
form['title_filing'] = 'Test Filing Title'
form['date_dacs'] = 'circa 1980'
form['date_iso'] = '1980'
form['local_identifier'] = 'LOCALID-test'
form['extent'] = 'loads of boxes'
form['abstract'] = 'a nice test collection'
form['accessrestrict'] = 'public domain'
form['userestrict'] = 'go craxy'
form['acqinfo'] = 'by mark'
form['scopecontent'] = 'test content'
response = form.submit(user='testuser')
self.assertTemplateUsed(response,'collection_record/collection_record/add.html')
form=response.form
testark = 'ark:/99999/fk45b0b4n'
form['ark'] = testark
response = form.submit(user='testuser')
self.failUnlessEqual(302, response.status_code)
response = response.follow()
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'LOCALID')
cr=CollectionRecord.objects.get(ark=testark)
response = self.app.get(cr.get_edit_url(), user='testuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'Test Title')
self.assertTemplateUsed(response,'collection_record/collection_record/edit.html')
def testLongInput(self):
'''Test that form invalid on long inputs (title, extent, all char fields)
'''
def check_resp_error_field(self, form, fieldname):
response = form.submit(user='oactestuser')
self.failUnlessEqual(200, response.status_code)
self.assertContains(response, 'errors below')
self.assertContains(response, CollectionRecord._meta.get_field_by_name(fieldname)[0].max_length)
return response.form
def check_resp_success(self, form):
response = form.submit(user='oactestuser')
self.failUnlessEqual(302, response.status_code)
response = response.follow()
self.failUnlessEqual(200, response.status_code)
def get_form_and_fill(self, url):
response = self.app.get(url, user='oactestuser')
form = response.form
self.fill_form_values(form)
return form
url_add = reverse('collection_record_add')
form = get_form_and_fill(self, url_add)
form['title'] = 'x' * 513
form = check_resp_error_field(self, form, 'title')
self.fill_form_values(form)
form['title'] = 'x' * 512
check_resp_success(self, form)
form = get_form_and_fill(self, url_add)
form['title_filing'] = 'x' * 256
form = check_resp_error_field(self, form, 'title_filing')
form['title_filing'] = 'x' * 255
check_resp_success(self, form)
form = get_form_and_fill(self, url_add)
form['title_filing'] = '0'
form['extent'] = 'x' * 1001
form = check_resp_error_field(self, form, 'extent')
form['extent'] = 'x' * 1000
check_resp_success(self, form)
form = get_form_and_fill(self, url_add)
form['title_filing'] = '1'
form['date_dacs'] = 'x' * 129
form = check_resp_error_field(self, form, 'date_dacs')
form['date_dacs'] = 'x' * 128
check_resp_success(self, form)
form = get_form_and_fill(self, url_add)
form['title_filing'] = '2'
form['date_iso'] = 'x' * 129
form = check_resp_error_field(self, form, 'date_iso')
form['date_iso'] = 'x' * 128
check_resp_success(self, form)
form = get_form_and_fill(self, url_add)
form['title_filing'] = '3'
form['local_identifier'] = 'x' * 256
form = check_resp_error_field(self, form, 'local_identifier')
form['local_identifier'] = 'x' * 255
check_resp_success(self, form)
from collection_record.is_oac import is_OAC
if is_OAC():
class CollectionRecordOACViewTestCase(CollectionRecordTestDirSetupMixin, LiveServerTestCase):
'''Test the annotated view from the xtf. We add a couple of elements (edit button)
There needs to be a working DSC OAC xtf running on the host specified in
the env var FINDAID_HOSTNAME
'''
fixtures = ['collection_record.collectionrecord.json', 'collection_record.dublincore.json', 'collection_record.publishinginstitution.json', 'collection_record.auth.user.json']
def setUp(self):
# Start a test server and tell selenium where to find it.
live_server = self.live_server_url.replace('http://', '')
os.environ['BACK_SERVER'] = live_server
#self.start_test_server('localhost', 8080)
super(CollectionRecordOACViewTestCase, self).setUp()
def tearDown(self):
#self.stop_test_server()
super(CollectionRecordOACViewTestCase, self).tearDown()
def testOACView(self):
rec = CollectionRecord.objects.get(pk="1")
url = rec.get_absolute_url()
url = self.live_server_url+url
response = self.client.get(url)
self.failUnlessEqual(302, response.status_code)
ret = self.client.login(username='testuser',password='testuser')
self.failUnless(ret)
response = self.client.get(url)
self.failUnlessEqual(200, response.status_code)
#Need a live serverfor this to work....
self.assertContains(response, 'First Test Title')
self.assertContains(response, 'localid')
self.assertContains(response, 'Bancroft')
self.assertContains(response, rec.get_edit_url())
self.assertContains(response, 'logout')
def testOACViewNotOwner(self):
'''Check that the "Edit" button link doesn't appear in the preview
for people who can't edit the findaid
'''
rec = CollectionRecord.objects.get(pk="1")
url = rec.get_absolute_url()
url = self.live_server_url+url
response = self.client.get(url)
self.failUnlessEqual(302, response.status_code)
ret = self.client.login(username='testuser',password='testuser')
self.failUnless(ret)
response = self.client.get(url)
self.failUnlessEqual(200, response.status_code)
#Need a live serverfor this to work....
self.assertContains(response, 'First Test Title')
self.assertContains(response, 'localid')
self.assertContains(response, 'Bancroft')
self.assertNotContains(response, rec.get_edit_url())
self.assertContains(response, 'logout')
class CollectionRecordPermissionsBackendTestCase(CollectionRecordTestDirSetupMixin, TestCase):
'''test the permission backend for the Collection record app
'''
fixtures = ['collection_record.collectionrecord.json', 'collection_record.dublincore.json', 'collection_record.publishinginstitution.json', 'collection_record.auth.user.json']
def setUp(self):
self.backend = CollectionRecordPermissionBackend()
super(CollectionRecordPermissionsBackendTestCase, self).setUp()
def testUserNotAuthenticated(self):
'''Test when the user object has not been authenticated
'''
u = User.objects.get(pk=1)
self.backend.has_perm(u, 'collection_record.change_collectionrecord')
def testNoObject(self):
u = User.objects.get(pk=1)
self.backend.has_perm(u, 'collection_record.change_collectionrecord')
class SupplementalFileTestCase(CollectionRecordTestDirSetupMixin, TestCase):
'''Test the supplemental files'''
fixtures = ['collection_record.collectionrecord.json', 'collection_record.dublincore.json', 'collection_record.supplementalfile.json', 'collection_record.publishinginstitution.json', 'collection_record.auth.user.json']
def setUp(self):
super(SupplementalFileTestCase, self).setUp()
cr = CollectionRecord.objects.get(ark='ark:/99999/fk46h4rq4')
debug_print( "SUPP DIR" + cr.dir_supplemental_files)
if not os.path.isdir(cr.dir_supplemental_files):
os.makedirs(cr.dir_supplemental_files)
fixtures_dir = os.path.abspath(os.path.join(os.path.split(__file__)[0], '../', 'fixtures'))
pdf_test_files = glob.glob(os.path.join(fixtures_dir, '*.pdf'))
debug_print("PDF TEST FILES:::" + str(pdf_test_files))
for f in pdf_test_files:
shutil.copy(f, cr.dir_supplemental_files)
def testURL(self):
'''Check that the url is correct for a file'''
sf = SupplementalFile.objects.get(pk=53)
def testTextFilePath(self):
'''Check that the name of the txt file is correct'''
sf = SupplementalFile.objects.get(pk=53)
self.assertTrue(sf.txt_file_path[-3:] == 'txt')
def testFileHandle(self):
sf = SupplementalFile.objects.get(pk=53)
sf.get_filehandle( mode='rb')
def testRipToText(self):
sf = SupplementalFile.objects.get(pk=53)
from collection_record.is_oac import is_OAC
OAC = is_OAC()
if OAC:
sf.rip_to_text()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import glob
import time
import uuid
import pickle
import config as dconfig
import logging
import requests
import grequests
import marathon
import argparse
import functools
import jenkinsapi
import multiprocessing
logging.basicConfig(filename='driver.log',level=logging.DEBUG)
# Constants Start
MARATHON_URL = ''
APP_PREFIX = 'test_jenkins_master_'
JOB_PREFIX = 'job_'
JENKINS_JOB_NAME='build'
JENKINS_JOB_CONFIG = 'echojob'
FIXTURES_DIR = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'fixtures'))+"/"
APP_FIXTURES = "apps/"
JOB_FIXTURES = "jobs/"
JENKINS_FIXTURE= "jenkins"
WORKERS = multiprocessing.cpu_count()
NUM_JENKINS_JOBS = 2
NUM_JOBS_TO_TRIGGER = 2
# Constants End
def _init_config(driverConfig):
global MARATHON_URL
global APP_PREFIX
global JOB_PREFIX
global JENKINS_JOB_NAME
global JENKINS_JOB_CONFIG
global NUM_JENKINS_JOBS
global NUM_JOBS_TO_TRIGGER
MARATHON_URL = driverConfig['marathon_url']
APP_PREFIX = driverConfig['app_prefix']
JOB_PREFIX = driverConfig['job_prefix']
JENKINS_JOB_NAME = driverConfig['jenkins_job_name']
JENKINS_JOB_CONFIG = driverConfig['jenkins_job_config']
NUM_JENKINS_JOBS = int(driverConfig['num_jenkins_jobs'])
NUM_JOBS_TO_TRIGGER = int(driverConfig['num_jobs_to_trigger'])
def check_jenkins(jenkins_url):
if not jenkins_url:
return False
RETRY_COUNT = 5
RETRY_INTERVAL = 5 # seconds
response = None
while RETRY_COUNT > 0:
try:
logging.debug("Checking jenkins: %s, tries left: %s" % (jenkins_url, str(RETRY_COUNT)))
response = requests.head(jenkins_url)
if (response.status_code % 200 < 100):
return True
except:
logging.error('Timeout for: %s | count: %s' % (jenkins_url, str(RETRY_COUNT)))
time.sleep(RETRY_INTERVAL)
RETRY_COUNT = RETRY_COUNT - 1
return False
def get_host_port(master_name):
mclient = marathon.Marathon(MARATHON_URL)
response = mclient.getApp(master_name)
if response:
tasks = response['app']['tasks']
if tasks and len(tasks) > 0:
host = tasks[0]['host']
port = str(tasks[0]['ports'][0])
jenkins_url = "http://"+host+":"+port
return jenkins_url
return None
def create_jenkins_master(master_name, jobConfig):
logging.debug('Creating jenkins master: %s' %(master_name))
app_json = load_fixture(FIXTURES_DIR+APP_FIXTURES+JENKINS_FIXTURE)
app_json = app_json % (master_name, master_name)
mclient = marathon.Marathon(MARATHON_URL)
status = mclient.createApp(app_json)
# time.sleep(120)
jenkins_url = None
return {'name': master_name, 'url': jenkins_url}
def poll(master):
master_name = master['name']
jenkins_url = None
MAX_ATTEMPTS = 20
for i in range(MAX_ATTEMPTS):
jenkins_url = get_host_port(master_name)
if not jenkins_url:
time.sleep(5)
else:
break
logging.debug('Attempt [%s] | master: %s | url: %s' % (i, master_name, jenkins_url))
return {'name': master_name, 'url': jenkins_url}
def create_jenkins_job(job_name, master_url, job_config_url):
logging.debug('Creating jenkins job: %s with job config: %s on master: %s' % (job_name, job_config_url, master_url))
config = load_fixture(FIXTURES_DIR+JOB_FIXTURES+job_config_url)
if config:
logging.debug(config)
headers = {'content-type': 'application/xml', 'accept':'application/xml'}
post_url = str(master_url)+"/createItem?name="+str(job_name)
try:
r = requests.post(post_url, data=config, headers=headers)
if r.status_code % 200 < 100:
return master_url+'/job/'+job_name
else:
return None
except Exception, err:
logging.exception('Error in create_jenkins_job()')
return None
def create_jenkins_jobs(master):
name = master['name']
url = master['url']
status = check_jenkins(url)
if not status:
return None
job_names = [JENKINS_JOB_NAME + str(i) for i in range(NUM_JENKINS_JOBS)]
jobs = map(functools.partial(create_jenkins_job, master_url=url, job_config_url=JENKINS_JOB_CONFIG), job_names)
return {'name': name, 'url': url, 'jobs': jobs}
def trigger_build(job_url):
logging.debug('Triggering build for job: %s' % (job_url))
headers = {'content-type': 'application/xml', 'accept':'application/xml'}
build_url = job_url+"/build"
r = requests.post(build_url, headers=headers)
time.sleep(1)
if r.status_code % 200 < 100 or r.status_code % 300 < 100:
return True
else:
return False
def delete_jenkins_master(master_name):
logging.debug('Deleting jenkins master: %s' % (master_name))
mclient = marathon.Marathon(MARATHON_URL)
mclient.deleteApp(master_name)
def initialize(driverConfig, config, num_apps):
try:
pool = multiprocessing.Pool(WORKERS)
# Create jenkins masters
master_names = [APP_PREFIX + str(uuid.uuid4()) for i in xrange(num_apps)]
created_masters = pool.map(functools.partial(create_jenkins_master, jobConfig=config), master_names)
logging.debug("Created masters: %s" % (created_masters))
# Give marathon some time, before polling for host:port
time.sleep(60)
polled_masters = pool.map(poll, created_masters)
logging.debug("Polled masters: %s" % (polled_masters))
# Creating jobs
created_jobs = pool.map(create_jenkins_jobs, polled_masters)
logging.debug("Created Jobs: %s" % (created_jobs))
for created_job in created_jobs:
if created_job:
name = created_job['name']
url = created_job['url']
jobs = created_job['jobs']
config.addMaster(name, url)
for job in jobs:
config.addJob(name, job)
except Exception, err:
logging.exception('Error in initialize()')
def trigger(driverConfig, jobConfig):
jobs_list_list = jobConfig.getJobs()
headers = {'content-type': 'application/xml', 'accept':'application/xml'}
jobs_to_trigger = [grequests.post(job+'/build', headers=headers) for job_list in jobs_list_list.values() for job in job_list] * NUM_JOBS_TO_TRIGGER
grequests.map(jobs_to_trigger)
def cleanup(driverConfig, jobConfig, jobConfigFile):
masters = jobConfig.getMasterNames()
map(delete_jenkins_master, masters)
os.remove(jobConfigFile)
def save_config(driverConfig, config_file, config):
with open(config_file, 'wb') as fd:
pickle.dump(config, fd)
def load_config(config_file):
with open(config_file, 'r') as fd:
return pickle.load(fd)
def load_fixture(fixture_path):
logging.debug('Loading fixture: %s' % (fixture_path))
with open(fixture_path, "r") as fd:
return fd.read()
# Collect statistics
def captureClusterStatus(driverConfig, config):
mastersConfig = config.getMasters()
masters = [{'name':name, 'url': mastersConfig[name]} for name in mastersConfig.keys()]
logging.debug('Capturing statuses for masters: %s' % masters)
pool = multiprocessing.Pool(WORKERS)
status = pool.map(captureStatus, masters)
return status
def captureStatus(master):
name = master['name']
url = master['url']
j = jenkinsapi.jenkins.Jenkins(url)
# Get jobs
jobs = j.keys()
jobStatus = {'name':name, 'url':url, 'jobs': []}
if jobs:
# Get build status and times
for job in jobs:
buildIds = j[job].get_build_ids()
builds = []
for buildId in buildIds:
build = j[job].get_build(int(buildId))
duration = build._data['duration']
status = build.get_status()
buildStatus = {'buildId':str(buildId), 'duration':str(duration), 'status': status}
builds.append(buildStatus)
jobStatus['jobs'].append({'job':job, 'builds':builds})
logging.debug(jobStatus)
return jobStatus
def flattenStats(driverConfig, stats):
flatstats=[]
for s in stats:
url = s['url']
for job in s['jobs']:
jobUrl = url + '/job/' + job['job']
for build in job['builds']:
buildUrl = jobUrl + '/' + build['buildId']
status = str(build['status'])
duration = build['duration']
flatstats.append((buildUrl, status, duration))
return flatstats
def getAllJobs():
mclient = marathon.Marathon(MARATHON_URL)
apps = mclient.getApps()
for app in apps['apps']:
a = mclient.getApp(app['id'])
host = a['app']['tasks'][0]['host']
port = a['app']['ports'][0]
url = 'http://' + host + ':' + str(port)
master = {'name': id, 'url': url}
print captureStatus(master)
if __name__ == '__main__':
getAllJobs()
| |
import copy
import glob
import itertools
import os
import uuid
from typing import Dict, List, Optional, Union
import warnings
from ray.tune.error import TuneError
from ray.tune.experiment import Experiment, convert_to_experiment_list
from ray.tune.config_parser import make_parser, create_trial_from_spec
from ray.tune.suggest.variant_generator import (
count_variants, count_spec_samples, generate_variants, format_vars,
flatten_resolved_vars, get_preset_variants)
from ray.tune.suggest.search import SearchAlgorithm
from ray.tune.utils.util import atomic_save, load_newest_checkpoint
SERIALIZATION_THRESHOLD = 1e6
class _VariantIterator:
"""Iterates over generated variants from the search space.
This object also toggles between lazy evaluation and
eager evaluation of samples. If lazy evaluation is enabled,
this object cannot be serialized.
"""
def __init__(self, iterable, lazy_eval=False):
self.lazy_eval = lazy_eval
self.iterable = iterable
self._has_next = True
if lazy_eval:
self._load_value()
else:
self.iterable = list(iterable)
self._has_next = bool(self.iterable)
def _load_value(self):
try:
self.next_value = next(self.iterable)
except StopIteration:
self._has_next = False
def has_next(self):
return self._has_next
def __next__(self):
if self.lazy_eval:
current_value = self.next_value
self._load_value()
return current_value
current_value = self.iterable.pop(0)
self._has_next = bool(self.iterable)
return current_value
class _TrialIterator:
"""Generates trials from the spec.
Args:
uuid_prefix (str): Used in creating the trial name.
num_samples (int): Number of samples from distribution
(same as tune.run).
unresolved_spec (dict): Experiment specification
that might have unresolved distributions.
constant_grid_search (bool): Should random variables be sampled
first before iterating over grid variants (True) or not (False).
output_path (str): A specific output path within the local_dir.
points_to_evaluate (list): Same as tune.run.
lazy_eval (bool): Whether variants should be generated
lazily or eagerly. This is toggled depending
on the size of the grid search.
start (int): index at which to start counting trials.
"""
def __init__(self,
uuid_prefix: str,
num_samples: int,
unresolved_spec: dict,
constant_grid_search: bool = False,
output_path: str = "",
points_to_evaluate: Optional[List] = None,
lazy_eval: bool = False,
start: int = 0):
self.parser = make_parser()
self.num_samples = num_samples
self.uuid_prefix = uuid_prefix
self.num_samples_left = num_samples
self.unresolved_spec = unresolved_spec
self.constant_grid_search = constant_grid_search
self.output_path = output_path
self.points_to_evaluate = points_to_evaluate or []
self.num_points_to_evaluate = len(self.points_to_evaluate)
self.counter = start
self.lazy_eval = lazy_eval
self.variants = None
def create_trial(self, resolved_vars, spec):
trial_id = self.uuid_prefix + ("%05d" % self.counter)
experiment_tag = str(self.counter)
# Always append resolved vars to experiment tag?
if resolved_vars:
experiment_tag += "_{}".format(format_vars(resolved_vars))
self.counter += 1
return create_trial_from_spec(
spec,
self.output_path,
self.parser,
evaluated_params=flatten_resolved_vars(resolved_vars),
trial_id=trial_id,
experiment_tag=experiment_tag)
def __next__(self):
"""Generates Trial objects with the variant generation process.
Uses a fixed point iteration to resolve variants. All trials
should be able to be generated at once.
See also: `ray.tune.suggest.variant_generator`.
Returns:
Trial object
"""
if "run" not in self.unresolved_spec:
raise TuneError("Must specify `run` in {}".format(
self.unresolved_spec))
if self.variants and self.variants.has_next():
# This block will be skipped upon instantiation.
# `variants` will be set later after the first loop.
resolved_vars, spec = next(self.variants)
return self.create_trial(resolved_vars, spec)
if self.points_to_evaluate:
config = self.points_to_evaluate.pop(0)
self.num_samples_left -= 1
self.variants = _VariantIterator(
get_preset_variants(
self.unresolved_spec,
config,
constant_grid_search=self.constant_grid_search),
lazy_eval=self.lazy_eval)
resolved_vars, spec = next(self.variants)
return self.create_trial(resolved_vars, spec)
elif self.num_samples_left > 0:
self.variants = _VariantIterator(
generate_variants(
self.unresolved_spec,
constant_grid_search=self.constant_grid_search),
lazy_eval=self.lazy_eval)
self.num_samples_left -= 1
resolved_vars, spec = next(self.variants)
return self.create_trial(resolved_vars, spec)
else:
raise StopIteration
def __iter__(self):
return self
class BasicVariantGenerator(SearchAlgorithm):
"""Uses Tune's variant generation for resolving variables.
This is the default search algorithm used if no other search algorithm
is specified.
Args:
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
max_concurrent (int): Maximum number of concurrently running trials.
If 0 (default), no maximum is enforced.
constant_grid_search (bool): If this is set to ``True``, Ray Tune will
*first* try to sample random values and keep them constant over
grid search parameters. If this is set to ``False`` (default),
Ray Tune will sample new random parameters in each grid search
condition.
Example:
.. code-block:: python
from ray import tune
# This will automatically use the `BasicVariantGenerator`
tune.run(
lambda config: config["a"] + config["b"],
config={
"a": tune.grid_search([1, 2]),
"b": tune.randint(0, 3)
},
num_samples=4)
In the example above, 8 trials will be generated: For each sample
(``4``), each of the grid search variants for ``a`` will be sampled
once. The ``b`` parameter will be sampled randomly.
The generator accepts a pre-set list of points that should be evaluated.
The points will replace the first samples of each experiment passed to
the ``BasicVariantGenerator``.
Each point will replace one sample of the specified ``num_samples``. If
grid search variables are overwritten with the values specified in the
presets, the number of samples will thus be reduced.
Example:
.. code-block:: python
from ray import tune
from ray.tune.suggest.basic_variant import BasicVariantGenerator
tune.run(
lambda config: config["a"] + config["b"],
config={
"a": tune.grid_search([1, 2]),
"b": tune.randint(0, 3)
},
search_alg=BasicVariantGenerator(points_to_evaluate=[
{"a": 2, "b": 2},
{"a": 1},
{"b": 2}
]),
num_samples=4)
The example above will produce six trials via four samples:
- The first sample will produce one trial with ``a=2`` and ``b=2``.
- The second sample will produce one trial with ``a=1`` and ``b`` sampled
randomly
- The third sample will produce two trials, one for each grid search
value of ``a``. It will be ``b=2`` for both of these trials.
- The fourth sample will produce two trials, one for each grid search
value of ``a``. ``b`` will be sampled randomly and independently for
both of these trials.
"""
CKPT_FILE_TMPL = "basic-variant-state-{}.json"
def __init__(self,
points_to_evaluate: Optional[List[Dict]] = None,
max_concurrent: int = 0,
constant_grid_search: bool = False):
self._trial_generator = []
self._iterators = []
self._trial_iter = None
self._finished = False
self._points_to_evaluate = points_to_evaluate or []
# Unique prefix for all trials generated, e.g., trial ids start as
# 2f1e_00001, 2f1ef_00002, 2f1ef_0003, etc. Overridable for testing.
force_test_uuid = os.environ.get("_TEST_TUNE_TRIAL_UUID")
if force_test_uuid:
self._uuid_prefix = force_test_uuid + "_"
else:
self._uuid_prefix = str(uuid.uuid1().hex)[:5] + "_"
self._total_samples = 0
self.max_concurrent = max_concurrent
self._constant_grid_search = constant_grid_search
self._live_trials = set()
@property
def total_samples(self):
return self._total_samples
def add_configurations(
self,
experiments: Union[Experiment, List[Experiment], Dict[str, Dict]]):
"""Chains generator given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
"""
experiment_list = convert_to_experiment_list(experiments)
for experiment in experiment_list:
grid_vals = count_spec_samples(experiment.spec, num_samples=1)
lazy_eval = grid_vals > SERIALIZATION_THRESHOLD
if lazy_eval:
warnings.warn(
f"The number of pre-generated samples ({grid_vals}) "
"exceeds the serialization threshold "
f"({int(SERIALIZATION_THRESHOLD)}). Resume ability is "
"disabled. To fix this, reduce the number of "
"dimensions/size of the provided grid search.")
previous_samples = self._total_samples
points_to_evaluate = copy.deepcopy(self._points_to_evaluate)
self._total_samples += count_variants(experiment.spec,
points_to_evaluate)
iterator = _TrialIterator(
uuid_prefix=self._uuid_prefix,
num_samples=experiment.spec.get("num_samples", 1),
unresolved_spec=experiment.spec,
constant_grid_search=self._constant_grid_search,
output_path=experiment.dir_name,
points_to_evaluate=points_to_evaluate,
lazy_eval=lazy_eval,
start=previous_samples)
self._iterators.append(iterator)
self._trial_generator = itertools.chain(self._trial_generator,
iterator)
def next_trial(self):
"""Provides one Trial object to be queued into the TrialRunner.
Returns:
Trial: Returns a single trial.
"""
if self.max_concurrent > 0 and len(
self._live_trials) >= self.max_concurrent:
return None
if not self._trial_iter:
self._trial_iter = iter(self._trial_generator)
try:
trial = next(self._trial_iter)
self._live_trials.add(trial.trial_id)
return trial
except StopIteration:
self._trial_generator = []
self._trial_iter = None
self.set_finished()
return None
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
if trial_id in self._live_trials:
self._live_trials.remove(trial_id)
def get_state(self):
if any(iterator.lazy_eval for iterator in self._iterators):
return False
state = self.__dict__.copy()
del state["_trial_generator"]
return state
def set_state(self, state):
self.__dict__.update(state)
for iterator in self._iterators:
self._trial_generator = itertools.chain(self._trial_generator,
iterator)
def save_to_dir(self, dirpath, session_str):
if any(iterator.lazy_eval for iterator in self._iterators):
return False
state_dict = self.get_state()
atomic_save(
state=state_dict,
checkpoint_dir=dirpath,
file_name=self.CKPT_FILE_TMPL.format(session_str),
tmp_file_name=".tmp_generator")
def has_checkpoint(self, dirpath: str):
"""Whether a checkpoint file exists within dirpath."""
return bool(
glob.glob(os.path.join(dirpath, self.CKPT_FILE_TMPL.format("*"))))
def restore_from_dir(self, dirpath: str):
"""Restores self + searcher + search wrappers from dirpath."""
state_dict = load_newest_checkpoint(dirpath,
self.CKPT_FILE_TMPL.format("*"))
if not state_dict:
raise RuntimeError(
"Unable to find checkpoint in {}.".format(dirpath))
self.set_state(state_dict)
| |
#!/usr/bin/env python
"""Automatically install required tools and data to run bcbio-nextgen pipelines.
This automates the steps required for installation and setup to make it
easier to get started with bcbio-nextgen. The defaults provide data files
for human variant calling.
Requires: git, wget, bgzip2, Python 3.x, Python 2.7 or argparse + Python 2.6 and earlier
"""
from __future__ import print_function
import collections
import contextlib
import datetime
import os
import platform
import shutil
import subprocess
import sys
try:
import urllib2 as urllib_request
except ImportError:
import urllib.request as urllib_request
REMOTES = {
"requirements": "https://raw.githubusercontent.com/bcbio/bcbio-nextgen/master/requirements-conda.txt",
"gitrepo": "https://github.com/bcbio/bcbio-nextgen.git",
"system_config": "https://raw.githubusercontent.com/bcbio/bcbio-nextgen/master/config/bcbio_system.yaml",
"anaconda": "https://repo.continuum.io/miniconda/Miniconda3-latest-%s-x86_64.sh"}
TARGETPY = "python=3.6"
def main(args, sys_argv):
check_arguments(args)
check_dependencies()
with bcbio_tmpdir():
setup_data_dir(args)
print("Installing isolated base python installation")
anaconda = install_anaconda_python(args)
print("Installing mamba")
anaconda = install_mamba(anaconda, args)
print("Installing conda-build")
install_conda_build(anaconda, args)
print("Installing bcbio-nextgen")
bcbio = install_conda_pkgs(anaconda, args)
bootstrap_bcbionextgen(anaconda, args)
print("Installing data and third party dependencies")
system_config = write_system_config(REMOTES["system_config"], args.datadir,
args.tooldir)
setup_manifest(args.datadir)
subprocess.check_call([bcbio, "upgrade"] + _clean_args(sys_argv, args))
print("Finished: bcbio-nextgen, tools and data installed")
print(" Genome data installed in:\n %s" % args.datadir)
if args.tooldir:
print(" Tools installed in:\n %s" % args.tooldir)
print(" Ready to use system configuration at:\n %s" % system_config)
print(" Edit configuration file as needed to match your machine or cluster")
def _clean_args(sys_argv, args):
"""Remove data directory from arguments to pass to upgrade function.
"""
base = [x for x in sys_argv if
x.startswith("-") or not args.datadir == os.path.abspath(os.path.expanduser(x))]
# Remove installer only options we don't pass on
base = [x for x in base if x not in set(["--minimize-disk"])]
if "--nodata" in base:
base.remove("--nodata")
else:
base.append("--data")
return base
def bootstrap_bcbionextgen(anaconda, args):
if args.upgrade == "development":
git_tag = "@%s" % args.revision if args.revision != "master" else ""
subprocess.check_call([anaconda["pip"], "install", "--upgrade", "--no-deps",
"git+%s%s#egg=bcbio-nextgen" % (REMOTES["gitrepo"], git_tag)])
def _get_conda_channels(conda_bin):
"""Retrieve default conda channels, checking if they are pre-specified in config.
This allows users to override defaults with specific mirrors in their .condarc
"""
channels = ["bioconda", "conda-forge"]
out = []
try:
import yaml
config = yaml.safe_load(subprocess.check_output([conda_bin, "config", "--show"]))
except ImportError:
config = {}
for c in channels:
present = False
for orig_c in config.get("channels") or []:
if orig_c.endswith((c, "%s/" % c)):
present = True
break
if not present:
out += ["-c", c]
return out
def install_mamba(anaconda, args):
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
mamba = os.path.join(bindir, "mamba")
subprocess.check_call(
[anaconda["conda"], "install", "--yes"] +
_get_conda_channels(anaconda["conda"]) + ["mamba"])
anaconda["mamba"] = mamba
return anaconda
def install_conda_build(anaconda, args):
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
mamba = os.path.join(bindir, "mamba")
subprocess.check_call(
[anaconda["mamba"], "install", "--yes"] +
_get_conda_channels(anaconda["conda"]) + ["conda-build"])
def install_conda_pkgs(anaconda, args):
env = dict(os.environ)
# Try to avoid user specific pkgs and envs directories
# https://github.com/conda/conda/issues/6748
env["CONDA_PKGS_DIRS"] = os.path.join(anaconda["dir"], "pkgs")
env["CONDA_ENVS_DIRS"] = os.path.join(anaconda["dir"], "envs")
conda_bin = anaconda["conda"]
if "mamba" in anaconda.keys():
mamba_bin = anaconda["mamba"]
else:
mamba_bin = anaconda["conda"]
if not os.path.exists(os.path.basename(REMOTES["requirements"])):
subprocess.check_call(["wget", "--no-check-certificate", REMOTES["requirements"]])
if args.minimize_disk:
subprocess.check_call([mamba_bin, "install", "--yes", "nomkl"], env=env)
channels = _get_conda_channels(conda_bin)
subprocess.check_call([mamba_bin, "install", "--yes"] + channels +
["--only-deps", "bcbio-nextgen", TARGETPY], env=env)
subprocess.check_call([conda_bin, "install", "--yes"] + channels +
["--file", os.path.basename(REMOTES["requirements"]), TARGETPY], env=env)
return os.path.join(anaconda["dir"], "bin", "bcbio_nextgen.py")
def _guess_distribution():
"""Simple approach to identify if we are on a MacOSX or Linux system for Anaconda.
"""
if platform.mac_ver()[0]:
return "macosx"
else:
return "linux"
def install_anaconda_python(args):
"""Provide isolated installation of Anaconda python for running bcbio-nextgen.
http://docs.continuum.io/anaconda/index.html
"""
anaconda_dir = os.path.join(args.datadir, "anaconda")
bindir = os.path.join(anaconda_dir, "bin")
conda = os.path.join(bindir, "conda")
if not os.path.exists(anaconda_dir) or not os.path.exists(conda):
if os.path.exists(anaconda_dir):
shutil.rmtree(anaconda_dir)
dist = args.distribution if args.distribution else _guess_distribution()
url = REMOTES["anaconda"] % ("MacOSX" if dist.lower() == "macosx" else "Linux")
if not os.path.exists(os.path.basename(url)):
subprocess.check_call(["wget", "--progress=dot:mega", "--no-check-certificate", url])
subprocess.check_call("bash %s -b -p %s" %
(os.path.basename(url), anaconda_dir), shell=True)
return {"conda": conda,
"pip": os.path.join(bindir, "pip"),
"dir": anaconda_dir}
def setup_manifest(datadir):
"""Create barebones manifest to be filled in during update
"""
manifest_dir = os.path.join(datadir, "manifest")
if not os.path.exists(manifest_dir):
os.makedirs(manifest_dir)
def write_system_config(base_url, datadir, tooldir):
"""Write a bcbio_system.yaml configuration file with tool information.
"""
out_file = os.path.join(datadir, "galaxy", os.path.basename(base_url))
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
if os.path.exists(out_file):
# if no tool directory and exists, do not overwrite
if tooldir is None:
return out_file
else:
bak_file = out_file + ".bak%s" % (datetime.datetime.now().strftime("%Y%M%d_%H%M"))
shutil.copy(out_file, bak_file)
if tooldir:
java_basedir = os.path.join(tooldir, "share", "java")
rewrite_ignore = ("log",)
with contextlib.closing(urllib_request.urlopen(base_url)) as in_handle:
with open(out_file, "w") as out_handle:
in_resources = False
in_prog = None
for line in (l.decode("utf-8") for l in in_handle):
if line[0] != " ":
in_resources = line.startswith("resources")
in_prog = None
elif (in_resources and line[:2] == " " and line[2] != " "
and not line.strip().startswith(rewrite_ignore)):
in_prog = line.split(":")[0].strip()
# Update java directories to point to install directory, avoid special cases
elif line.strip().startswith("dir:") and in_prog and in_prog not in ["log", "tmp"]:
final_dir = os.path.basename(line.split()[-1])
if tooldir:
line = "%s: %s\n" % (line.split(":")[0],
os.path.join(java_basedir, final_dir))
in_prog = None
elif line.startswith("galaxy"):
line = "# %s" % line
out_handle.write(line)
return out_file
def setup_data_dir(args):
if not os.path.exists(args.datadir):
cmd = ["mkdir", "-p", args.datadir]
subprocess.check_call(cmd)
@contextlib.contextmanager
def bcbio_tmpdir():
orig_dir = os.getcwd()
work_dir = os.path.join(os.getcwd(), "tmpbcbio-install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
yield work_dir
os.chdir(orig_dir)
shutil.rmtree(work_dir)
def check_arguments(args):
"""Ensure argruments are consistent and correct.
"""
if args.toolplus and not args.tooldir:
raise argparse.ArgumentTypeError("Cannot specify --toolplus without --tooldir")
def check_dependencies():
"""Ensure required tools for installation are present.
"""
print("Checking required dependencies")
for dep, msg in [(["git", "--version"], "Git (http://git-scm.com/)"),
(["wget", "--version"], "wget"),
(["bzip2", "-h"], "bzip2")]:
try:
p = subprocess.Popen(dep, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
out, code = p.communicate()
except OSError:
out = "Executable not found"
code = 127
if code == 127:
raise OSError("bcbio-nextgen installer requires %s\n%s" % (msg, out))
def _check_toolplus(x):
"""Parse options for adding non-standard/commercial tools like GATK and MuTecT.
"""
import argparse
Tool = collections.namedtuple("Tool", ["name", "fname"])
std_choices = set(["data", "dbnsfp", "ericscript"])
if x in std_choices:
return Tool(x, None)
elif "=" in x and len(x.split("=")) == 2:
name, fname = x.split("=")
fname = os.path.normpath(os.path.realpath(fname))
if not os.path.exists(fname):
raise argparse.ArgumentTypeError("Unexpected --toolplus argument for %s. File does not exist: %s"
% (name, fname))
return Tool(name, fname)
else:
raise argparse.ArgumentTypeError("Unexpected --toolplus argument. Expect toolname=filename.")
if __name__ == "__main__":
try:
import argparse
except ImportError:
raise ImportError("bcbio-nextgen installer requires `argparse`, included in Python 2.7.\n"
"Install for earlier versions with `pip install argparse` or "
"`easy_install argparse`.")
parser = argparse.ArgumentParser(
description="Automatic installation for bcbio-nextgen pipelines")
parser.add_argument("datadir", help="Directory to install genome data",
type=lambda x: (os.path.abspath(os.path.expanduser(x))))
parser.add_argument("--cores", default=1,
help="Number of cores to use if local indexing is necessary.")
parser.add_argument("--tooldir",
help="Directory to install 3rd party software tools. Leave unspecified for no tools",
type=lambda x: (os.path.abspath(os.path.expanduser(x))), default=None)
parser.add_argument("--toolplus", help="Specify additional tool categories to install",
action="append", default=[], type=_check_toolplus)
parser.add_argument("--datatarget", help="Data to install. Allows customization or install of extra data.",
action="append", default=[],
choices=["variation", "rnaseq", "smallrna", "gemini", "vep", "dbnsfp",
"battenberg", "kraken", "ericscript", "gnomad"])
parser.add_argument("--genomes", help="Genomes to download",
action="append", default=[],
choices=["GRCh37", "hg19", "hg38", "hg38-noalt", "mm10", "mm9", "rn6", "rn5",
"canFam3", "dm3", "galGal4", "phix", "pseudomonas_aeruginosa_ucbpp_pa14",
"sacCer3", "TAIR10", "WBcel235", "xenTro3", "GRCz10", "GRCz11",
"Sscrofa11.1", "BDGP6"])
parser.add_argument("--aligners", help="Aligner indexes to download",
action="append", default=[],
choices=["bbmap", "bowtie", "bowtie2", "bwa", "minimap2", "novoalign", "rtg", "snap",
"star", "ucsc", "hisat2"])
parser.add_argument("--nodata", help="Do not install data dependencies",
dest="install_data", action="store_false", default=True)
parser.add_argument("--isolate", help="Created an isolated installation without PATH updates",
dest="isolate", action="store_true", default=False)
parser.add_argument("--minimize-disk", help="Try to minimize disk usage (no MKL extensions)",
dest="minimize_disk", action="store_true", default=False)
parser.add_argument("-u", "--upgrade", help="Code version to install",
choices=["stable", "development"], default="stable")
parser.add_argument("--revision", help="Specify a git commit hash or tag to install", default="master")
parser.add_argument("--distribution", help="Operating system distribution",
default="",
choices=["ubuntu", "debian", "centos", "scientificlinux", "macosx"])
if len(sys.argv) == 1:
parser.print_help()
else:
main(parser.parse_args(), sys.argv[1:])
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing serializable datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.data.python.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import nest
def remove_variants(get_next_op):
# TODO(b/72408568): Remove this once session.run can get
# variant tensors.
"""Remove variants from a nest structure, so sess.run will execute."""
def _remove_variant(x):
if isinstance(x, ops.Tensor) and x.dtype == dtypes.variant:
return ()
else:
return x
return nest.map_structure(_remove_variant, get_next_op)
class DatasetSerializationTestBase(test.TestCase):
"""Base class for testing serializable datasets."""
def tearDown(self):
self._delete_ckpt()
# TODO(b/72657739): Remove sparse_tensor argument, which is to test the
# (deprecated) saveable `SparseTensorSliceDataset`, once the API
# `from_sparse_tensor_slices()`and related tests are deleted.
def run_core_tests(self, ds_fn1, ds_fn2, num_outputs, sparse_tensors=False):
"""Runs the core tests.
Args:
ds_fn1: 0-argument function that returns a Dataset.
ds_fn2: 0-argument function that returns a Dataset different from
ds_fn1. If None, verify_restore_in_modified_graph test is not run.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
Raises:
AssertionError if any test fails.
"""
self.verify_unused_iterator(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_fully_used_iterator(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_exhausted_iterator(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_init_before_restore(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_multiple_breaks(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_reset_restored_iterator(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_restore_in_empty_graph(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
if ds_fn2:
self.verify_restore_in_modified_graph(
ds_fn1, ds_fn2, num_outputs, sparse_tensors=sparse_tensors)
def verify_unused_iterator(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that saving and restoring an unused iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn, [0],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_fully_used_iterator(self, ds_fn, num_outputs,
sparse_tensors=False):
"""Verifies that saving and restoring a fully used iterator works.
Note that this only checks saving and restoring an iterator from which
`num_outputs` items have been produced but does not check for an
exhausted iterator, i.e., one from which an OutOfRange error has been
returned.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if test fails.
"""
self.verify_run_with_breaks(
ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors)
def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False):
"""Verifies that saving and restoring an exhausted iterator works.
An exhausted iterator is one which has returned an OutOfRange error.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
self.gen_outputs(
ds_fn, [],
num_outputs,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
actual = self.gen_outputs(
ds_fn, [],
0,
ckpt_saved=True,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
self.assertEqual(len(actual), 0)
def verify_init_before_restore(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that restoring into an already initialized iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs),
num_outputs,
init_before_restore=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_multiple_breaks(self,
ds_fn,
num_outputs,
num_breaks=10,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to save/restore at multiple break points.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
num_breaks: The number of break points. These are uniformly spread in
[0, num_outputs] both inclusive.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs, num_breaks),
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_reset_restored_iterator(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to re-initialize a restored iterator.
This is useful when restoring a training checkpoint during validation.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Collect ground truth containing all outputs.
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Skip some items and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Restore from checkpoint and then run init_op.
with ops.Graph().as_default() as g:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.test_session(graph=g) as sess:
self._restore(saver, sess)
self._initialize(init_op, sess)
for _ in range(num_outputs):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_modified_graph(self,
ds_fn1,
ds_fn2,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in a modified graph.
Builds an input pipeline using ds_fn1, runs it for `break_point` steps
and saves a checkpoint. Then builds a new graph using ds_fn2, restores
the checkpoint from ds_fn1 and verifies that the restore is successful.
Args:
ds_fn1: See `run_core_tests`.
ds_fn2: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn1
# in `expected`.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn1, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn1 and save checkpoint.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build graph for ds_fn2 but load checkpoint for ds_fn1.
with ops.Graph().as_default() as g:
_, get_next_op, saver = self._build_graph(
ds_fn2, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.test_session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_empty_graph(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in an empty graph.
Builds an input pipeline using ds_fn, runs it for `break_point` steps
and saves a checkpoint. Then builds a new empty graph, restores
the checkpoint from ds_fn and verifies that the restore is successful.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn
# in `expected`.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build an empty graph but load checkpoint for ds_fn.
with ops.Graph().as_default() as g:
get_next_op, saver = self._build_empty_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.test_session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_error_on_save(self,
ds_fn,
num_outputs,
error,
break_point=None,
sparse_tensors=False):
"""Attempts to save a non-saveable iterator.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
error: Declared error when trying to save iterator.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.test_session(graph=g) as sess:
self._initialize(init_op, sess)
for _ in range(break_point):
sess.run(get_next_op)
with self.assertRaises(error):
self._save(sess, saver)
def verify_run_with_breaks(self,
ds_fn,
break_points,
num_outputs,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that ds_fn() produces the same outputs with and without breaks.
1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
*without* stopping at break points.
2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
with stopping at break points.
Deep matches outputs from 1 and 2.
Args:
ds_fn: See `gen_outputs`.
break_points: See `gen_outputs`.
num_outputs: See `gen_outputs`.
init_before_restore: See `gen_outputs`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
actual = self.gen_outputs(
ds_fn,
break_points,
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
self.match(expected, actual)
def gen_outputs(self,
ds_fn,
break_points,
num_outputs,
ckpt_saved=False,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True,
save_checkpoint_at_end=True):
"""Generates elements from input dataset while stopping at break points.
Produces `num_outputs` outputs and saves the state of the iterator in the
Saver checkpoint.
Args:
ds_fn: 0-argument function that returns the dataset.
break_points: A list of integers. For each `break_point` in
`break_points`, we produce outputs till `break_point` number of items
have been produced and then checkpoint the state. The current graph
and session are destroyed and a new graph and session are used to
produce outputs till next checkpoint or till `num_outputs` elements
have been produced. `break_point` must be <= `num_outputs`.
num_outputs: The total number of outputs to produce from the iterator.
ckpt_saved: Whether a checkpoint already exists. If False, we build the
graph from ds_fn.
init_before_restore: Whether init should be called before saver.restore.
This is just so that we can verify that restoring an already initialized
iterator works.
sparse_tensors: Whether dataset is built from SparseTensor(s).
verify_exhausted: Whether to verify that the iterator has been exhausted
after producing `num_outputs` elements.
save_checkpoint_at_end: Whether to save a checkpoint after producing all
outputs. If False, checkpoints are saved each break point but not at the
end. Note that checkpoints overwrite each other so there is always only
a single checkpoint available. Defaults to True.
Returns:
A list of `num_outputs` items.
"""
outputs = []
def get_ops():
if ckpt_saved:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
else:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
return init_op, get_next_op, saver
for i in range(len(break_points) + 1):
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = get_ops()
get_next_op = remove_variants(get_next_op)
with self.test_session(graph=g) as sess:
if ckpt_saved:
if init_before_restore:
self._initialize(init_op, sess)
self._restore(saver, sess)
else:
self._initialize(init_op, sess)
start = break_points[i - 1] if i > 0 else 0
end = break_points[i] if i < len(break_points) else num_outputs
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
if i == len(break_points) and verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
if save_checkpoint_at_end or i < len(break_points):
self._save(sess, saver)
ckpt_saved = True
return outputs
def match(self, expected, actual):
"""Matches nested structures.
Recursively matches shape and values of `expected` and `actual`.
Handles scalars, numpy arrays and other python sequence containers
e.g. list, dict.
Args:
expected: Nested structure 1.
actual: Nested structure 2.
Raises:
AssertionError if matching fails.
"""
if isinstance(expected, np.ndarray):
expected = expected.tolist()
if isinstance(actual, np.ndarray):
actual = actual.tolist()
self.assertEqual(type(expected), type(actual))
if nest.is_sequence(expected):
self.assertEqual(len(expected), len(actual))
if isinstance(expected, dict):
for key1, key2 in zip(sorted(expected), sorted(actual)):
self.assertEqual(key1, key2)
self.match(expected[key1], actual[key2])
else:
for item1, item2 in zip(expected, actual):
self.match(item1, item2)
else:
self.assertEqual(expected, actual)
def does_not_match(self, expected, actual):
with self.assertRaises(AssertionError):
self.match(expected, actual)
def gen_break_points(self, num_outputs, num_samples=10):
"""Generates `num_samples` breaks points in [0, num_outputs]."""
return np.linspace(0, num_outputs, num_samples, dtype=int)
def _build_graph(self, ds_fn, sparse_tensors=False):
iterator = ds_fn().make_initializable_iterator()
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
init_op = iterator.initializer
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
self._add_iterator_ops_to_collection(init_op, get_next, ds_fn,
sparse_tensors)
saver = saver_lib.Saver(allow_empty=True)
return init_op, get_next, saver
def _build_empty_graph(self, ds_fn, sparse_tensors=False):
iterator = iterator_ops.Iterator.from_structure(
self._get_output_types(ds_fn),
output_shapes=self._get_output_shapes(ds_fn),
output_classes=self._get_output_classes(ds_fn))
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
saver = saver_lib.Saver(allow_empty=True)
return get_next, saver
def _add_iterator_ops_to_collection(self,
init_op,
get_next,
ds_fn,
sparse_tensors=False):
ops.add_to_collection("iterator_ops", init_op)
# `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections
# do not support tuples we flatten the tensors and restore the shape in
# `_get_iterator_ops_from_collection`.
# TODO(shivaniagrwal): `output_classes` is a nested structure of classes,
# this base class is specific to current test cases. Update when tests are
# added with `output_classes` as a nested structure with at least one of the
# component being `tf.SparseTensor`.
if (sparse_tensors or
self._get_output_classes(ds_fn) is sparse_tensor.SparseTensor):
ops.add_to_collection("iterator_ops", get_next.indices)
ops.add_to_collection("iterator_ops", get_next.values)
ops.add_to_collection("iterator_ops", get_next.dense_shape)
else:
for el in nest.flatten(get_next):
ops.add_to_collection("iterator_ops", el)
def _get_iterator_ops_from_collection(self, ds_fn, sparse_tensors=False):
all_ops = ops.get_collection("iterator_ops")
if (sparse_tensors or
self._get_output_classes(ds_fn) is sparse_tensor.SparseTensor):
init_op, indices, values, dense_shape = all_ops
return init_op, sparse_tensor.SparseTensor(indices, values, dense_shape)
else:
return all_ops[0], nest.pack_sequence_as(
self._get_output_types(ds_fn), all_ops[1:])
def _get_output_types(self, ds_fn):
with ops.Graph().as_default():
return ds_fn().output_types
def _get_output_shapes(self, ds_fn):
with ops.Graph().as_default():
return ds_fn().output_shapes
def _get_output_classes(self, ds_fn):
with ops.Graph().as_default():
return ds_fn().output_classes
def _ckpt_path(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _latest_ckpt(self):
return saver_lib.latest_checkpoint(self.get_temp_dir())
def _save(self, sess, saver):
saver.save(sess, self._ckpt_path())
def _restore(self, saver, sess):
sess.run(lookup_ops.tables_initializer())
saver.restore(sess, self._latest_ckpt())
def _initialize(self, init_op, sess):
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
sess.run(init_op)
def _import_meta_graph(self):
meta_file_path = self._ckpt_path() + ".meta"
return saver_lib.import_meta_graph(meta_file_path)
def _delete_ckpt(self):
# Remove all checkpoint files.
prefix = self._ckpt_path()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFeedSparePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session():
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
out_t.eval(feed_dict={feed_t: np_array}))
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testRunAndPartialRun(self):
with session.Session() as sess:
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, "Cannot interpret feed_dict"):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testRunWithNoTargetsIsAnError(self):
with session.Session() as sess:
_ = constant_op.constant(5.0)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'Must specify at least one target to fetch or execute.'):
sess.run([])
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.mul(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
if __name__ == '__main__':
googletest.main()
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Offering.environment'
db.delete_column(u'physical_offering', 'environment_id')
# Adding M2M table for field environments on 'Offering'
m2m_table_name = db.shorten_name(u'physical_offering_environments')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('offering', models.ForeignKey(orm[u'physical.offering'], null=False)),
('environment', models.ForeignKey(orm[u'physical.environment'], null=False))
))
db.create_unique(m2m_table_name, ['offering_id', 'environment_id'])
def backwards(self, orm):
# Adding field 'Offering.environment'
db.add_column(u'physical_offering', 'environment',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name=u'offerings', to=orm['physical.Environment']),
keep_default=False)
# Removing M2M table for field environments on 'Offering'
db.delete_table(db.shorten_name(u'physical_offering_environments'))
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
| |
from sympy import (Symbol, gamma, oo, nan, zoo, factorial, sqrt, Rational, log,
polygamma, EulerGamma, pi, uppergamma, S, expand_func, loggamma, sin,
cos, O, cancel, lowergamma, exp, erf, beta)
from sympy.utilities.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx,
test_numerically as tn)
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', integer=True)
def test_gamma():
assert gamma(nan) == nan
assert gamma(oo) == oo
assert gamma(-100) == zoo
assert gamma(0) == zoo
assert gamma(1) == 1
assert gamma(2) == 1
assert gamma(3) == 2
assert gamma(102) == factorial(101)
assert gamma(Rational(1,2)) == sqrt(pi)
assert gamma(Rational(3, 2)) == Rational(1, 2)*sqrt(pi)
assert gamma(Rational(5, 2)) == Rational(3, 4)*sqrt(pi)
assert gamma(Rational(7, 2)) == Rational(15, 8)*sqrt(pi)
assert gamma(Rational(-1, 2)) == -2*sqrt(pi)
assert gamma(Rational(-3, 2)) == Rational(4, 3)*sqrt(pi)
assert gamma(Rational(-5, 2)) == -Rational(8, 15)*sqrt(pi)
assert gamma(Rational(-15, 2)) == Rational(256, 2027025)*sqrt(pi)
assert gamma(Rational(-11, 8)).expand(func=True) == Rational(64, 33)*gamma(Rational(5, 8))
assert gamma(Rational(-10, 3)).expand(func=True) == Rational(81, 280)*gamma(Rational(2, 3))
assert gamma(Rational(14, 3)).expand(func=True) == Rational(880, 81)*gamma(Rational(2, 3))
assert gamma(Rational(17, 7)).expand(func=True) == Rational(30, 49)*gamma(Rational(3, 7))
assert gamma(Rational(19, 8)).expand(func=True) == Rational(33, 64)*gamma(Rational(3, 8))
assert gamma(x).diff(x) == gamma(x)*polygamma(0, x)
assert gamma(x - 1).expand(func=True) == gamma(x)/(x-1)
assert gamma(x + 2).expand(func=True, mul=False) == x*(x+1)*gamma(x)
assert expand_func(gamma(x + Rational(3, 2))) == \
(x + Rational(1, 2))*gamma(x + Rational(1, 2))
assert expand_func(gamma(x - Rational(1, 2))) == \
gamma(Rational(1, 2) + x)/(x - Rational(1, 2))
# Test a bug:
assert expand_func(gamma(x + Rational(3, 4))) == gamma(x + Rational(3, 4))
def test_gamma_series():
assert gamma(x + 1).series(x, 0, 3) == \
1 - EulerGamma*x + x**2*(EulerGamma**2/2 + pi**2/12) + O(x**3)
assert gamma(x).series(x, -1, 3) == \
-1/x + EulerGamma - 1 + x*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma) \
+ x**2*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma**3/6 - \
polygamma(2, 1)/6 + EulerGamma*pi**2/12 + EulerGamma) + O(x**3)
def tn_branch(s, func):
from sympy import I, pi, exp_polar
from random import uniform
c = uniform(1, 5)
expr = func(s, c*exp_polar(I*pi)) - func(s, c*exp_polar(-I*pi))
eps = 1e-15
expr2 = func(s + eps, -c + eps*I) - func(s + eps, -c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_lowergamma():
from sympy import meijerg, exp_polar, I, expint
assert lowergamma(x, y).diff(y) == y**(x-1)*exp(-y)
assert td(lowergamma(randcplx(), y), y)
assert lowergamma(x, y).diff(x) == \
gamma(x)*polygamma(0, x) - uppergamma(x, y)*log(y) \
+ meijerg([], [1, 1], [0, 0, x], [], y)
assert lowergamma(S.Half, x) == sqrt(pi)*erf(sqrt(x))
assert not lowergamma(S.Half - 3, x).has(lowergamma)
assert not lowergamma(S.Half + 3, x).has(lowergamma)
assert lowergamma(S.Half, x, evaluate=False).has(lowergamma)
assert tn(lowergamma(S.Half + 3, x, evaluate=False),
lowergamma(S.Half + 3, x), x)
assert tn(lowergamma(S.Half - 3, x, evaluate=False),
lowergamma(S.Half - 3, x), x)
assert lowergamma(x, y).rewrite(uppergamma) == gamma(x) - uppergamma(x, y)
assert tn_branch(-3, lowergamma)
assert tn_branch(-4, lowergamma)
assert tn_branch(S(1)/3, lowergamma)
assert tn_branch(pi, lowergamma)
assert lowergamma(3, exp_polar(4*pi*I)*x) == lowergamma(3, x)
assert lowergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*lowergamma(y, x*exp_polar(pi*I))
assert lowergamma(-2, exp_polar(5*pi*I)*x) == \
lowergamma(-2, x*exp_polar(I*pi)) + 2*pi*I
assert lowergamma(x, y).rewrite(expint) == -y**x*expint(-x + 1, y) + gamma(x)
k = Symbol('k', integer=True)
assert lowergamma(k, y).rewrite(expint) == -y**k*expint(-k + 1, y) + gamma(k)
k = Symbol('k', integer=True, positive=False)
assert lowergamma(k, y).rewrite(expint) == lowergamma(k, y)
def test_uppergamma():
from sympy import meijerg, exp_polar, I, expint
assert uppergamma(4, 0) == 6
assert uppergamma(x, y).diff(y) == -y**(x-1)*exp(-y)
assert td(uppergamma(randcplx(), y), y)
assert uppergamma(x, y).diff(x) == \
uppergamma(x, y)*log(y) + meijerg([], [1, 1], [0, 0, x], [], y)
assert td(uppergamma(x, randcplx()), x)
assert uppergamma(S.Half, x) == sqrt(pi)*(1 - erf(sqrt(x)))
assert not uppergamma(S.Half - 3, x).has(uppergamma)
assert not uppergamma(S.Half + 3, x).has(uppergamma)
assert uppergamma(S.Half, x, evaluate=False).has(uppergamma)
assert tn(uppergamma(S.Half + 3, x, evaluate=False),
uppergamma(S.Half + 3, x), x)
assert tn(uppergamma(S.Half - 3, x, evaluate=False),
uppergamma(S.Half - 3, x), x)
assert uppergamma(x, y).rewrite(lowergamma) == gamma(x) - lowergamma(x, y)
assert tn_branch(-3, uppergamma)
assert tn_branch(-4, uppergamma)
assert tn_branch(S(1)/3, uppergamma)
assert tn_branch(pi, uppergamma)
assert uppergamma(3, exp_polar(4*pi*I)*x) == uppergamma(3, x)
assert uppergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*uppergamma(y, x*exp_polar(pi*I)) + gamma(y)*(1-exp(4*pi*I*y))
assert uppergamma(-2, exp_polar(5*pi*I)*x) == \
uppergamma(-2, x*exp_polar(I*pi)) - 2*pi*I
assert uppergamma(-2, x) == expint(3, x)/x**2
assert uppergamma(x, y).rewrite(expint) == y**x*expint(-x + 1, y)
def test_polygamma():
from sympy import I
assert polygamma(n, nan) == nan
assert polygamma(0, oo) == oo
assert polygamma(1, oo) == 0
assert polygamma(5, oo) == 0
assert polygamma(0, -9) == zoo
assert polygamma(0, -9) == zoo
assert polygamma(0, -1) == zoo
assert polygamma(0, 0) == zoo
assert polygamma(0, 1) == -EulerGamma
assert polygamma(0, 7) == Rational(49, 20) - EulerGamma
assert polygamma(1, 1) == pi**2/6
assert polygamma(1, 2) == pi**2/6 - 1
assert polygamma(1, 3) == pi**2/6 - Rational(5, 4)
assert polygamma(3, 1) == pi**4 / 15
assert polygamma(3, 5) == 6*(Rational(-22369,20736) + pi**4/90)
assert polygamma(5, 1) == 8 * pi**6 / 63
def t(m, n):
x = S(m)/n
r = polygamma(0, x)
if r.has(polygamma):
return False
return abs(polygamma(0, x.n()).n() - r.n()).n() < 1e-10
assert t(1, 2)
assert t(3, 2)
assert t(-1, 2)
assert t(1, 4)
assert t(-3, 4)
assert t(1, 3)
assert t(4, 3)
assert t(3, 4)
assert t(2, 3)
assert polygamma(3, 7*x).diff(x) == 7*polygamma(4, 7*x)
# Polygamma of non-negative integer order is unbranched:
from sympy import exp_polar
k = Symbol('n', integer=True, nonnegative=True)
assert polygamma(k, exp_polar(2*I*pi)*x) == polygamma(k, x)
# but negative integers are branched!
k = Symbol('n', integer=True)
assert polygamma(k, exp_polar(2*I*pi)*x).args == (k, exp_polar(2*I*pi)*x)
# Polygamma of order -1 is loggamma:
assert polygamma(-1, x) == loggamma(x)
# But smaller orders are iterated integrals and don't have a special name
assert polygamma(-2, x).func is polygamma
# Test a bug
assert polygamma(0, -x).expand(func=True) == polygamma(0, -x)
def test_polygamma_expand_func():
assert polygamma(0, x).expand(func=True) == polygamma(0, x)
assert polygamma(0, 2*x).expand(func=True) == \
polygamma(0, x)/2 + polygamma(0, Rational(1, 2) + x)/2 + log(2)
assert polygamma(1, 2*x).expand(func=True) == \
polygamma(1, x)/4 + polygamma(1, Rational(1, 2) + x)/4
assert polygamma(2, x).expand(func=True) == \
polygamma(2, x)
assert polygamma(0, -1 + x).expand(func=True) == \
polygamma(0, x) - 1/(x - 1)
assert polygamma(0, 1 + x).expand(func=True) == \
1/x + polygamma(0, x )
assert polygamma(0, 2 + x).expand(func=True) == \
1/x + 1/(1 + x) + polygamma(0, x)
assert polygamma(0, 3 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x)
assert polygamma(0, 4 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x) + 1/(3 + x)
assert polygamma(1, 1 + x).expand(func=True) == \
polygamma(1, x) - 1/x**2
assert polygamma(1, 2 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2
assert polygamma(1, 3 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - 1/(2 + x)**2
assert polygamma(1, 4 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - \
1/(2 + x)**2 - 1/(3 + x)**2
assert polygamma(0, x + y).expand(func=True) == \
polygamma(0, x + y)
assert polygamma(1, x + y).expand(func=True) == \
polygamma(1, x + y)
assert polygamma(1, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(1, y + 4*x) - 1/(y + 4*x)**2 - \
1/(1 + y + 4*x)**2 - 1/(2 + y + 4*x)**2
assert polygamma(3, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4 - \
6/(1 + y + 4*x)**4 - 6/(2 + y + 4*x)**4
assert polygamma(3, 4*x + y + 1).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4
e = polygamma(3, 4*x + y + S(3)/2)
assert e.expand(func=True) == e
e = polygamma(3, x + y + S(3)/4)
assert e.expand(func = True, basic = False) == e
def test_loggamma():
s1 = loggamma(1/(x+sin(x))+cos(x)).nseries(x,n=4)
s2 = (-log(2*x)-1)/(2*x) - log(x/pi)/2 + (4-log(2*x))*x/24 + O(x**2)
assert (s1 - s2).expand(force=True).removeO() == 0
s1 = loggamma(1/x).series(x)
s2 = (1/x-S(1)/2)*log(1/x) - 1/x + log(2*pi)/2 + \
x/12 - x**3/360 + x**5/1260 + O(x**7)
assert ((s1 - s2).expand(force=True)).removeO() == 0
def tN(N, M):
assert loggamma(1/x)._eval_nseries(x,n=N,logx=None).getn() == M
tN(0, 0)
tN(1, 1)
tN(2, 3)
tN(3, 3)
tN(4, 5)
tN(5, 5)
def test_polygamma_expansion():
# A. & S., pa. 259 and 260
assert polygamma(0, 1/x).nseries(x, n=3) \
== -log(x) - x/2 - x**2/12 + O(x**4)
assert polygamma(1, 1/x).series(x, n=5) \
== x + x**2/2 + x**3/6 + O(x**5)
assert polygamma(3, 1/x).nseries(x, n=8) \
== 2*x**3 + 3*x**4 + 2*x**5 - x**7 + 4*x**9/3 + O(x**11)
def test_beta_function():
x, y = Symbol('x'), Symbol('y')
assert beta(x,y) == gamma(x)*gamma(y)/gamma(x+y)
assert beta(x,y) == beta(y,x) # Symmetric
| |
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division
import argparse
import random
import os
import json
from .file_helpers import read_contents
from .common import version
import sys
import re
import gettext
import locale
from time import time
from io import open
from .mocodo_error import MocodoError
DESCRIPTION = """
NAME:
Mocodo - An Entity-Relation Diagram Generator.
DESCRIPTION:
Mocodo is an open-source tool for designing and teaching relational databases.
It takes as an input a textual description of both entities and associations
of an entity-relationship diagram (ERD). It outputs a vectorial drawing in SVG
and a relational schema in various formats (SQL, LaTeX, Markdown, etc.).
NOTE:
Each one of the following values is:
- explicitely specified by the user as a command line option;
- otherwise, retrieved from a file located at --params_path;
- otherwise, retrieved from a file named 'params.json' in the input directory;
- otherwise, calculated from a default value, possibly dependant of your system.
"""
EPILOG = u"""
SEE ALSO:
Online version http://mocodo.net
Source code https://github.com/laowantong/mocodo
Localization https://www.transifex.com/aristide/mocodo/
LICENSE:
GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007.
CONTACT:
Mail <software name>@wingi.net
Author Aristide Grange
Address Universite de Lorraine
Laboratoire LCOMS - UFR MIM
Ile du Saulcy
57000 Metz
France
""" # NB: accents raise an error in Jupyter Notebook
class ArgumentDefaultsRawDescriptionHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
def init_localization(script_directory, language):
if not language:
if sys.platform.lower().startswith("darwin") and os.system("defaults read -g AppleLanguages > /tmp/languages.txt") == 0:
language = re.search("\W*(\w+)", read_contents("/tmp/languages.txt")).group(1)
else:
try:
language = locale.getdefaultlocale()[0][:2]
except:
language = "en"
try:
with open("%s/res/messages_%s.mo" % (script_directory, language), "rb") as mo_contents:
trans = gettext.GNUTranslations(mo_contents)
except IOError:
trans = gettext.NullTranslations()
if sys.version_info.major == 2:
trans.install(unicode=True)
else:
trans.install()
return language
def has_expired(timeout):
if timeout:
timeout += time()
def inner_function():
return time() > timeout
else:
def inner_function():
return False
return inner_function
def rate(string):
try:
value = float(string)
except ValueError:
msg = "The rate %r cannot be coerced to float" % string
raise argparse.ArgumentTypeError(msg)
if not (0 <= value <= 1):
msg = "The rate %r is not between 0 and 1" % string
raise argparse.ArgumentTypeError(msg)
return value
def scale(string):
try:
value = float(string)
except ValueError:
msg = "The scale %r cannot be coerced to float" % string
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = "The scale %r is not strictly positive" % string
raise argparse.ArgumentTypeError(msg)
return value
def non_negative_integer(string):
try:
value = int(string)
except ValueError:
msg = "The value %r cannot be coerced to an integer" % string
raise argparse.ArgumentTypeError(msg)
if value < 0:
msg = "The integer %r is negative" % string
raise argparse.ArgumentTypeError(msg)
return value
def positive_integer(string):
try:
value = int(string)
except ValueError:
msg = "The value %r cannot be coerced to an integer" % string
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = "The integer %r is negative or zero" % string
raise argparse.ArgumentTypeError(msg)
return value
def parsed_arguments():
def add_key(key, value):
params[key] = value
params["added_keys"].append(key)
script_directory = os.path.dirname(os.path.realpath(os.path.join(__file__)))
parser = argparse.ArgumentParser(
prog="mocodo",
add_help=False,
formatter_class=ArgumentDefaultsRawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
mocodo_group = parser.add_argument_group("OPTIONS ON MOCODO ITSELF")
io_group = parser.add_argument_group("INPUT/OUTPUT")
aspect_group = parser.add_argument_group("ASPECT OF THE GRAPHICAL OUTPUT")
relational_group = parser.add_argument_group("RELATIONAL OUTPUT")
source_group = parser.add_argument_group("MODIFICATIONS OF THE SOURCE TEXT")
bb_group = parser.add_argument_group("BRANCH & BOUND LAYOUT REARRANGEMENT", "sub-options triggered by the option --arrange=bb")
ga_group = parser.add_argument_group("GENETIC ALGORITHM LAYOUT REARRANGEMENT", "sub-options triggered by the option --arrange=ga")
lp_group = parser.add_argument_group("LINEAR PROGRAMMING LAYOUT REARRANGEMENT", "sub-options triggered by the option --arrange=lp")
nb_group = parser.add_argument_group("NOTEBOOK SPECIFIC OPTIONS", "ignored when called from the command line")
if sys.platform.lower().startswith("darwin"):
default_params = {
"encodings": ["utf8", "macroman"],
"image_format": "nodebox" if os.path.exists("/Applications/NodeBox/NodeBox.app") or os.path.exists("/Applications/NodeBox.app") else "svg",
"shapes": "copperplate",
}
elif sys.platform.lower().startswith("win"):
default_params = {
"encodings": ["utf8", "ISO_8859-15"],
"image_format": "svg",
"shapes": "trebuchet",
}
else: # linux
default_params = {
"encodings": ["utf8", "ISO_8859-15"],
"image_format": "svg",
"shapes": "serif",
}
mocodo_group.add_argument("--language", metavar="CODE", type=str, help="override the automatic localization of the messages with the given language code (e.g., 'fr', 'en', ...)")
io_group.add_argument("--params_path", metavar="PATH", default="params.json", help="the path of the parameter file. If omitted, use 'params.json' in the input directory. If non existent, use default parameters.")
io_group.add_argument("--input", metavar="PATH", help="the path of the input file. By default, the output files will be generated in the same directory")
(args, remaining_args) = parser.parse_known_args()
text_type = (unicode if sys.version_info.major == 2 else str)
if args.input and not os.path.exists(args.input):
if os.path.exists(args.input + ".mcd"):
args.input += ".mcd"
else: # the user has explicitely specified a non existent input file
init_localization(script_directory, default_params.get("language", args.language))
raise MocodoError(18, _('The file "{input}" doesn\'t exist.').format(input=args.input))
default_params["input"] = args.input
if os.path.exists(args.params_path):
default_params.update(json.loads(read_contents(args.params_path)))
if not default_params["input"]:
default_params["input"] = "sandbox.mcd"
default_params["language"] = init_localization(script_directory, default_params.get("language", args.language))
default_params.setdefault("output_dir", os.path.dirname(default_params["input"]))
mocodo_group.add_argument("--help", action="help", help="show this help message and exit")
mocodo_group.add_argument("--version", action="version", version="%(prog)s " + version, help="display the version number, then exit")
mocodo_group.add_argument("--restore", action="store_true", help="recreate a pristine version of the files 'sandbox.mcd' and 'params.json' in the input directory, then exit")
aspect_group.add_argument("--df", metavar="STR", type=text_type, default=u"DF", help="the acronym to be circled in a functional dependency")
aspect_group.add_argument("--card_format", metavar="STR", type=text_type, nargs="?", default=u"{min_card},{max_card}", help="format string for minimal and maximal cardinalities")
aspect_group.add_argument("--strengthen_card", metavar="STR", type=text_type, nargs="?", default=u"_1,1_", help="string for relative cardinalities")
source_group.add_argument("--flex", metavar="FLOAT", type=float, default=0.75, help="flex straight legs whose cardinalities may collide")
aspect_group.add_argument("--tkinter", action="store_true", help="use Tkinter to calculate the pixel-dimensions of the labels")
aspect_group.add_argument("--colors", metavar="PATH", default="bw", help="the color palette to use when generating the drawing. Name (without extension) of a file located in the directory 'colors', or path to a personal file")
aspect_group.add_argument("--shapes", metavar="PATH", help="specification of the fonts, dimensions, etc. Name (without extension) of a file located in the directory 'shapes', or path to a personal file")
aspect_group.add_argument("--scale", metavar="RATE", type=scale, default=1, help="scale the diagram by the given factor")
aspect_group.add_argument("--hide_annotations", action="store_true", help="ignore the hovering of annotated elements")
relational_group.add_argument("--relations", metavar="NAME", nargs="*", default=["html", "text"], help="one or several templates for the generated relational schemas. Cf. directory 'relation_templates'")
relational_group.add_argument("--disambiguation", choices=["numbers_only", "annotations"], default="annotations", help="specify the way to disambiguate foreign attributes")
relational_group.add_argument("--title", metavar="STR", default=_(u'Untitled').encode("utf8"), type=str, help="database name (used for SQL output)")
relational_group.add_argument("--guess_title", action="store_true", help="use the name of the most referred entity as title")
io_group.add_argument("--output_dir", metavar="PATH", help="the directory of the output files")
io_group.add_argument("--encodings", metavar="STR", nargs="*", help="one or several encodings to be tried successively when reading the input file")
io_group.add_argument("--extract", action="store_true", help="create a separated JSON file for the geometric parameters")
io_group.add_argument("--image_format", choices=["svg", "nodebox"], help="override the automatic selection (depending on your installation) of the image format produced by the generated script")
io_group.add_argument("--print_params", action="store_true", help="display the contents of the parameter file, then exit")
source_group.add_argument("--arrange", nargs="?", const="bb", choices=["bb", "ga", "lp"], help="rearrange the layout with either a Branch & Bound, a Genetic Algorithm, or a Linear Program solver, then exit")
source_group.add_argument("--timeout", metavar="SECONDS", type=int, help="limit the duration of the layout rearrangement")
source_group.add_argument("--verbose", action="store_true", help="display some gory details during the layout rearrangement")
source_group.add_argument("--fit", metavar="INT", type=int, const=0, nargs="?", help="fit the layout in the nth smallest grid")
source_group.add_argument("--flip", choices=["h", "v", "d"], help="display an horizontal / vertical / diagonal flip of the input file, then exit")
source_group.add_argument("--obfuscate", metavar="PATH", type=os.path.abspath, nargs="?", const="lorem_ipsum.txt", help="display an obfuscated version of the input file, then exit. Cf. directory 'lorem'")
source_group.add_argument("--obfuscation_max_length", metavar="NAT*", type=positive_integer, help="maximal length of obfuscated labels")
source_group.add_argument("--obfuscation_min_distance", metavar="NAT*", type=positive_integer, default=3, help="minimal Damerau-Levenshtein's distance between any two obfuscated labels")
source_group.add_argument("--seed", metavar="FLOAT", type=float, help="initial value for the random number generator")
bb_group.add_argument("--call_limit", metavar="NAT*", type=positive_integer, default=10000, help="maximal number of calls for a given starting box")
bb_group.add_argument("--min_objective", metavar="NAT*", type=positive_integer, default=0, help="best acceptable fitness for a layout")
bb_group.add_argument("--max_objective", metavar="NAT*", type=positive_integer, default=15, help="worst acceptable fitness for a layout")
bb_group.add_argument("--organic", action="store_true", help="unconstrained Branch & Bound")
ga_group.add_argument("--population_size", metavar="NAT*", type=positive_integer, default=1000, help="number of individuals to evolve")
ga_group.add_argument("--crossover_rate", metavar="RATE", type=rate, default=0.9, help="crossover rate, between 0 and 1")
ga_group.add_argument("--mutation_rate", metavar="RATE", type=rate, default=0.06, help="mutation rate, between 0 and 1")
ga_group.add_argument("--sample_size", metavar="NAT*", type=positive_integer, default=7, help="the sample size in tournaments")
ga_group.add_argument("--max_generations", metavar="NAT*", type=positive_integer, default=300, help="maximal number of generations")
ga_group.add_argument("--plateau", metavar="NAT*", type=positive_integer, default=30, help="maximal number of consecutive generations without improvement")
lp_group.add_argument("--engine", nargs="?", const="cplex", choices=["cplex", "gurobi"], help="solver for the linear program")
nb_group.add_argument("--mld", action="store_true", help="display the HTML relational model in the cell output")
nb_group.add_argument("--no_mcd", action="store_true", help="do not display the conceptual diagram in the cell output")
nb_group.add_argument("--replace", action="store_true", help="replaces the cell contents by its output")
parser.set_defaults(**default_params)
params = vars(parser.parse_args(remaining_args))
params["added_keys"] = ["added_keys", "params_path"]
add_key("script_directory", script_directory)
add_key("has_expired", has_expired(params["timeout"]))
add_key("output_name", os.path.join(params["output_dir"], os.path.splitext(os.path.basename(params["input"]))[0]))
# import pprint
# pprint.pprint(params)
if not os.path.exists(params["input"]):
import shutil
shutil.copyfile(os.path.join(params["script_directory"], "pristine_sandbox.mcd"), params["input"])
random.seed(params["seed"])
# params["title"] = params["title"].decode("utf8")
return params
| |
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import copy
import json
import operator
import re
import six
import urllib
from jinja2 import Environment, StrictUndefined
from jinja2.exceptions import UndefinedError, TemplateSyntaxError
# Django
from django.core import exceptions as django_exceptions
from django.db.models.signals import (
post_save,
post_delete,
)
from django.db.models.signals import m2m_changed
from django.db import models
from django.db.models.fields.related import add_lazy_relation
from django.db.models.fields.related_descriptors import (
ReverseOneToOneDescriptor,
ForwardManyToOneDescriptor,
ManyToManyDescriptor,
ReverseManyToOneDescriptor,
)
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
# jsonschema
from jsonschema import Draft4Validator, FormatChecker
import jsonschema.exceptions
# Django-JSONField
from jsonfield import JSONField as upstream_JSONField
from jsonbfield.fields import JSONField as upstream_JSONBField
# DRF
from rest_framework import serializers
# AWX
from awx.main.utils.filters import SmartFilter
from awx.main.utils.encryption import encrypt_value, decrypt_value, get_encryption_key
from awx.main.validators import validate_ssh_private_key
from awx.main.models.rbac import batch_role_ancestor_rebuilding, Role
from awx.main.constants import CHOICES_PRIVILEGE_ESCALATION_METHODS, ENV_BLACKLIST
from awx.main import utils
__all__ = ['AutoOneToOneField', 'ImplicitRoleField', 'JSONField',
'SmartFilterField', 'update_role_parentage_for_instance',
'is_implicit_parent']
# Provide a (better) custom error message for enum jsonschema validation
def __enum_validate__(validator, enums, instance, schema):
if instance not in enums:
yield jsonschema.exceptions.ValidationError(
_("'{value}' is not one of ['{allowed_values}']").format(
value=instance, allowed_values="', '".join(enums))
)
Draft4Validator.VALIDATORS['enum'] = __enum_validate__
class JSONField(upstream_JSONField):
def db_type(self, connection):
return 'text'
def from_db_value(self, value, expression, connection, context):
if value in {'', None} and not self.null:
return {}
return super(JSONField, self).from_db_value(value, expression, connection, context)
class JSONBField(upstream_JSONBField):
def get_prep_lookup(self, lookup_type, value):
if isinstance(value, six.string_types) and value == "null":
return 'null'
return super(JSONBField, self).get_prep_lookup(lookup_type, value)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.vendor == 'sqlite':
# sqlite (which we use for tests) does not support jsonb;
return json.dumps(value)
return super(JSONBField, self).get_db_prep_value(
value, connection, prepared
)
def from_db_value(self, value, expression, connection, context):
# Work around a bug in django-jsonfield
# https://bitbucket.org/schinckel/django-jsonfield/issues/57/cannot-use-in-the-same-project-as-djangos
if isinstance(value, six.string_types):
return json.loads(value)
return value
# Based on AutoOneToOneField from django-annoying:
# https://bitbucket.org/offline/django-annoying/src/a0de8b294db3/annoying/fields.py
class AutoSingleRelatedObjectDescriptor(ReverseOneToOneDescriptor):
"""Descriptor for access to the object from its related class."""
def __get__(self, instance, instance_type=None):
try:
return super(AutoSingleRelatedObjectDescriptor,
self).__get__(instance, instance_type)
except self.related.related_model.DoesNotExist:
obj = self.related.related_model(**{self.related.field.name: instance})
if self.related.field.rel.parent_link:
raise NotImplementedError('not supported with polymorphic!')
for f in instance._meta.local_fields:
setattr(obj, f.name, getattr(instance, f.name))
obj.save()
return obj
class AutoOneToOneField(models.OneToOneField):
"""OneToOneField that creates related object if it doesn't exist."""
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
AutoSingleRelatedObjectDescriptor(related))
def resolve_role_field(obj, field):
ret = []
field_components = field.split('.', 1)
if hasattr(obj, field_components[0]):
obj = getattr(obj, field_components[0])
else:
return []
if obj is None:
return []
if len(field_components) == 1:
role_cls = str(utils.get_current_apps().get_model('main', 'Role'))
if not str(type(obj)) == role_cls:
raise Exception(smart_text('{} refers to a {}, not a Role'.format(field, type(obj))))
ret.append(obj.id)
else:
if type(obj) is ManyToManyDescriptor:
for o in obj.all():
ret += resolve_role_field(o, field_components[1])
else:
ret += resolve_role_field(obj, field_components[1])
return ret
def is_implicit_parent(parent_role, child_role):
'''
Determine if the parent_role is an implicit parent as defined by
the model definition. This does not include any role parents that
might have been set by the user.
'''
# Get the list of implicit parents that were defined at the class level.
implicit_parents = getattr(
child_role.content_object.__class__, child_role.role_field
).field.parent_role
if type(implicit_parents) != list:
implicit_parents = [implicit_parents]
# Check to see if the role matches any in the implicit parents list
for implicit_parent_path in implicit_parents:
if implicit_parent_path.startswith('singleton:'):
# Singleton role isn't an object role, `singleton_name` uniquely identifies it
if parent_role.is_singleton() and parent_role.singleton_name == implicit_parent_path[10:]:
return True
else:
# Walk over multiple related objects to obtain the implicit parent
related_obj = child_role.content_object
for next_field in implicit_parent_path.split('.'):
related_obj = getattr(related_obj, next_field)
if related_obj is None:
break
if related_obj and parent_role == related_obj:
return True
return False
def update_role_parentage_for_instance(instance):
'''update_role_parentage_for_instance
updates the parents listing for all the roles
of a given instance if they have changed
'''
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
cur_role = getattr(instance, implicit_role_field.name)
original_parents = set(json.loads(cur_role.implicit_parents))
new_parents = implicit_role_field._resolve_parent_roles(instance)
cur_role.parents.remove(*list(original_parents - new_parents))
cur_role.parents.add(*list(new_parents - original_parents))
new_parents_list = list(new_parents)
new_parents_list.sort()
new_parents_json = json.dumps(new_parents_list)
if cur_role.implicit_parents != new_parents_json:
cur_role.implicit_parents = new_parents_json
cur_role.save()
class ImplicitRoleDescriptor(ForwardManyToOneDescriptor):
pass
class ImplicitRoleField(models.ForeignKey):
"""Implicitly creates a role entry for a resource"""
def __init__(self, parent_role=None, *args, **kwargs):
self.parent_role = parent_role
kwargs.setdefault('to', 'Role')
kwargs.setdefault('related_name', '+')
kwargs.setdefault('null', 'True')
kwargs.setdefault('editable', False)
super(ImplicitRoleField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(ImplicitRoleField, self).deconstruct()
kwargs['parent_role'] = self.parent_role
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
super(ImplicitRoleField, self).contribute_to_class(cls, name)
setattr(cls, self.name, ImplicitRoleDescriptor(self))
if not hasattr(cls, '__implicit_role_fields'):
setattr(cls, '__implicit_role_fields', [])
getattr(cls, '__implicit_role_fields').append(self)
post_save.connect(self._post_save, cls, True, dispatch_uid='implicit-role-post-save')
post_delete.connect(self._post_delete, cls, True, dispatch_uid='implicit-role-post-delete')
add_lazy_relation(cls, self, "self", self.bind_m2m_changed)
def bind_m2m_changed(self, _self, _role_class, cls):
if not self.parent_role:
return
field_names = self.parent_role
if type(field_names) is not list:
field_names = [field_names]
for field_name in field_names:
# Handle the OR syntax for role parents
if type(field_name) == tuple:
continue
if field_name.startswith('singleton:'):
continue
field_name, sep, field_attr = field_name.partition('.')
field = getattr(cls, field_name)
if type(field) is ReverseManyToOneDescriptor or \
type(field) is ManyToManyDescriptor:
if '.' in field_attr:
raise Exception('Referencing deep roles through ManyToMany fields is unsupported.')
if type(field) is ReverseManyToOneDescriptor:
sender = field.through
else:
sender = field.related.through
reverse = type(field) is ManyToManyDescriptor
m2m_changed.connect(self.m2m_update(field_attr, reverse), sender, weak=False)
def m2m_update(self, field_attr, _reverse):
def _m2m_update(instance, action, model, pk_set, reverse, **kwargs):
if action == 'post_add' or action == 'pre_remove':
if _reverse:
reverse = not reverse
if reverse:
for pk in pk_set:
obj = model.objects.get(pk=pk)
if action == 'post_add':
getattr(instance, field_attr).children.add(getattr(obj, self.name))
if action == 'pre_remove':
getattr(instance, field_attr).children.remove(getattr(obj, self.name))
else:
for pk in pk_set:
obj = model.objects.get(pk=pk)
if action == 'post_add':
getattr(instance, self.name).parents.add(getattr(obj, field_attr))
if action == 'pre_remove':
getattr(instance, self.name).parents.remove(getattr(obj, field_attr))
return _m2m_update
def _post_save(self, instance, created, *args, **kwargs):
Role_ = utils.get_current_apps().get_model('main', 'Role')
ContentType_ = utils.get_current_apps().get_model('contenttypes', 'ContentType')
ct_id = ContentType_.objects.get_for_model(instance).id
Model = utils.get_current_apps().get_model('main', instance.__class__.__name__)
latest_instance = Model.objects.get(pk=instance.pk)
with batch_role_ancestor_rebuilding():
# Create any missing role objects
missing_roles = []
for implicit_role_field in getattr(latest_instance.__class__, '__implicit_role_fields'):
cur_role = getattr(latest_instance, implicit_role_field.name, None)
if cur_role is None:
missing_roles.append(
Role_(
role_field=implicit_role_field.name,
content_type_id=ct_id,
object_id=latest_instance.id
)
)
if len(missing_roles) > 0:
Role_.objects.bulk_create(missing_roles)
updates = {}
role_ids = []
for role in Role_.objects.filter(content_type_id=ct_id, object_id=latest_instance.id):
setattr(latest_instance, role.role_field, role)
updates[role.role_field] = role.id
role_ids.append(role.id)
type(latest_instance).objects.filter(pk=latest_instance.pk).update(**updates)
Role.rebuild_role_ancestor_list(role_ids, [])
update_role_parentage_for_instance(latest_instance)
instance.refresh_from_db()
def _resolve_parent_roles(self, instance):
if not self.parent_role:
return set()
paths = self.parent_role if type(self.parent_role) is list else [self.parent_role]
parent_roles = set()
for path in paths:
if path.startswith("singleton:"):
singleton_name = path[10:]
Role_ = utils.get_current_apps().get_model('main', 'Role')
qs = Role_.objects.filter(singleton_name=singleton_name)
if qs.count() >= 1:
role = qs[0]
else:
role = Role_.objects.create(singleton_name=singleton_name, role_field=singleton_name)
parents = [role.id]
else:
parents = resolve_role_field(instance, path)
for parent in parents:
parent_roles.add(parent)
return parent_roles
def _post_delete(self, instance, *args, **kwargs):
role_ids = []
for implicit_role_field in getattr(instance.__class__, '__implicit_role_fields'):
role_ids.append(getattr(instance, implicit_role_field.name + '_id'))
Role_ = utils.get_current_apps().get_model('main', 'Role')
child_ids = [x for x in Role_.parents.through.objects.filter(to_role_id__in=role_ids).distinct().values_list('from_role_id', flat=True)]
Role_.objects.filter(id__in=role_ids).delete()
Role.rebuild_role_ancestor_list([], child_ids)
class SmartFilterField(models.TextField):
def get_prep_value(self, value):
# Change any false value to none.
# https://docs.python.org/2/library/stdtypes.html#truth-value-testing
if not value:
return None
value = urllib.unquote(value)
try:
SmartFilter().query_from_string(value)
except RuntimeError as e:
raise models.base.ValidationError(e)
return super(SmartFilterField, self).get_prep_value(value)
class JSONSchemaField(JSONBField):
"""
A JSONB field that self-validates against a defined JSON schema
(http://json-schema.org). This base class is intended to be overwritten by
defining `self.schema`.
"""
format_checker = FormatChecker()
# If an empty {} is provided, we still want to perform this schema
# validation
empty_values = (None, '')
def get_default(self):
return copy.deepcopy(super(JSONBField, self).get_default())
def schema(self, model_instance):
raise NotImplementedError()
def validate(self, value, model_instance):
super(JSONSchemaField, self).validate(value, model_instance)
errors = []
for error in Draft4Validator(
self.schema(model_instance),
format_checker=self.format_checker
).iter_errors(value):
# strip Python unicode markers from jsonschema validation errors
error.message = re.sub(r'\bu(\'|")', r'\1', error.message)
if error.validator == 'pattern' and 'error' in error.schema:
error.message = six.text_type(error.schema['error']).format(instance=error.instance)
elif error.validator == 'type':
expected_type = error.validator_value
if expected_type == 'object':
expected_type = 'dict'
if error.path:
error.message = _(
'{type} provided in relative path {path}, expected {expected_type}'
).format(path=list(error.path), type=type(error.instance).__name__,
expected_type=expected_type)
else:
error.message = _(
'{type} provided, expected {expected_type}'
).format(path=list(error.path), type=type(error.instance).__name__,
expected_type=expected_type)
elif error.validator == 'additionalProperties' and hasattr(error, 'path'):
error.message = _(
'Schema validation error in relative path {path} ({error})'
).format(path=list(error.path), error=error.message)
errors.append(error)
if errors:
raise django_exceptions.ValidationError(
[e.message for e in errors],
code='invalid',
params={'value': value},
)
def get_db_prep_value(self, value, connection, prepared=False):
if connection.vendor == 'sqlite':
# sqlite (which we use for tests) does not support jsonb;
return json.dumps(value)
return super(JSONSchemaField, self).get_db_prep_value(
value, connection, prepared
)
def from_db_value(self, value, expression, connection, context):
# Work around a bug in django-jsonfield
# https://bitbucket.org/schinckel/django-jsonfield/issues/57/cannot-use-in-the-same-project-as-djangos
if isinstance(value, six.string_types):
return json.loads(value)
return value
@JSONSchemaField.format_checker.checks('vault_id')
def format_vault_id(value):
if '@' in value:
raise jsonschema.exceptions.FormatError('@ is not an allowed character')
return True
@JSONSchemaField.format_checker.checks('ssh_private_key')
def format_ssh_private_key(value):
# Sanity check: GCE, in particular, provides JSON-encoded private
# keys, which developers will be tempted to copy and paste rather
# than JSON decode.
#
# These end in a unicode-encoded final character that gets double
# escaped due to being in a Python 2 bytestring, and that causes
# Python's key parsing to barf. Detect this issue and correct it.
if not value or value == '$encrypted$':
return True
if r'\u003d' in value:
value = value.replace(r'\u003d', '=')
try:
validate_ssh_private_key(value)
except django_exceptions.ValidationError as e:
raise jsonschema.exceptions.FormatError(e.message)
return True
class CredentialInputField(JSONSchemaField):
"""
Used to validate JSON for
`awx.main.models.credential:Credential().inputs`.
Input data for credentials is represented as a dictionary e.g.,
{'api_token': 'abc123', 'api_secret': 'SECRET'}
For the data to be valid, the keys of this dictionary should correspond
with the field names (and datatypes) defined in the associated
CredentialType e.g.,
{
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string'
}, {
'id': 'api_secret',
'label': 'API Secret',
'type': 'string'
}]
}
"""
def schema(self, model_instance):
# determine the defined fields for the associated credential type
properties = {}
for field in model_instance.credential_type.inputs.get('fields', []):
field = field.copy()
if field['type'] == 'become_method':
field.pop('type')
field['choices'] = map(operator.itemgetter(0), CHOICES_PRIVILEGE_ESCALATION_METHODS)
properties[field['id']] = field
if field.get('choices', []):
field['enum'] = field['choices'][:]
return {
'type': 'object',
'properties': properties,
'dependencies': model_instance.credential_type.inputs.get('dependencies', {}),
'additionalProperties': False,
}
def validate(self, value, model_instance):
# decrypt secret values so we can validate their contents (i.e.,
# ssh_key_data format)
if not isinstance(value, dict):
return super(CredentialInputField, self).validate(value,
model_instance)
# Backwards compatability: in prior versions, if you submit `null` for
# a credential field value, it just considers the value an empty string
for unset in [key for key, v in model_instance.inputs.items() if not v]:
default_value = model_instance.credential_type.default_for_field(unset)
if default_value is not None:
model_instance.inputs[unset] = default_value
decrypted_values = {}
for k, v in value.items():
if all([
k in model_instance.credential_type.secret_fields,
v != '$encrypted$',
model_instance.pk
]):
if not isinstance(getattr(model_instance, k), six.string_types):
raise django_exceptions.ValidationError(
_('secret values must be of type string, not {}').format(type(v).__name__),
code='invalid',
params={'value': v},
)
decrypted_values[k] = utils.decrypt_field(model_instance, k)
else:
decrypted_values[k] = v
super(JSONSchemaField, self).validate(decrypted_values, model_instance)
errors = {}
for error in Draft4Validator(
self.schema(model_instance),
format_checker=self.format_checker
).iter_errors(decrypted_values):
if error.validator == 'pattern' and 'error' in error.schema:
error.message = six.text_type(error.schema['error']).format(instance=error.instance)
if error.validator == 'dependencies':
# replace the default error messaging w/ a better i18n string
# I wish there was a better way to determine the parameters of
# this validation failure, but the exception jsonschema raises
# doesn't include them as attributes (just a hard-coded error
# string)
match = re.search(
# 'foo' is a dependency of 'bar'
r"'" # apostrophe
r"([^']+)" # one or more non-apostrophes (first group)
r"'[\w ]+'" # one or more words/spaces
r"([^']+)", # second group
error.message,
)
if match:
label, extraneous = match.groups()
if error.schema['properties'].get(label):
label = error.schema['properties'][label]['label']
errors[extraneous] = [
_('cannot be set unless "%s" is set') % label
]
continue
if 'id' not in error.schema:
# If the error is not for a specific field, it's specific to
# `inputs` in general
raise django_exceptions.ValidationError(
error.message,
code='invalid',
params={'value': value},
)
errors[error.schema['id']] = [error.message]
inputs = model_instance.credential_type.inputs
for field in inputs.get('required', []):
if not value.get(field, None):
errors[field] = [_('required for %s') % (
model_instance.credential_type.name
)]
# `ssh_key_unlock` requirements are very specific and can't be
# represented without complicated JSON schema
if (
model_instance.credential_type.managed_by_tower is True and
'ssh_key_unlock' in model_instance.credential_type.defined_fields
):
# in order to properly test the necessity of `ssh_key_unlock`, we
# need to know the real value of `ssh_key_data`; for a payload like:
# {
# 'ssh_key_data': '$encrypted$',
# 'ssh_key_unlock': 'do-you-need-me?',
# }
# ...we have to fetch the actual key value from the database
if model_instance.pk and model_instance.ssh_key_data == '$encrypted$':
model_instance.ssh_key_data = model_instance.__class__.objects.get(
pk=model_instance.pk
).ssh_key_data
if model_instance.has_encrypted_ssh_key_data and not value.get('ssh_key_unlock'):
errors['ssh_key_unlock'] = [_('must be set when SSH key is encrypted.')]
if all([
model_instance.ssh_key_data,
value.get('ssh_key_unlock'),
not model_instance.has_encrypted_ssh_key_data
]):
errors['ssh_key_unlock'] = [_('should not be set when SSH key is not encrypted.')]
if errors:
raise serializers.ValidationError({
'inputs': errors
})
class CredentialTypeInputField(JSONSchemaField):
"""
Used to validate JSON for
`awx.main.models.credential:CredentialType().inputs`.
"""
def schema(self, model_instance):
return {
'type': 'object',
'additionalProperties': False,
'properties': {
'required': {
'type': 'array',
'items': {'type': 'string'}
},
'fields': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'type': {'enum': ['string', 'boolean', 'become_method']},
'format': {'enum': ['ssh_private_key']},
'choices': {
'type': 'array',
'minItems': 1,
'items': {'type': 'string'},
'uniqueItems': True
},
'id': {
'type': 'string',
'pattern': '^[a-zA-Z_]+[a-zA-Z0-9_]*$',
'error': '{instance} is an invalid variable name',
},
'label': {'type': 'string'},
'help_text': {'type': 'string'},
'multiline': {'type': 'boolean'},
'secret': {'type': 'boolean'},
'ask_at_runtime': {'type': 'boolean'},
},
'additionalProperties': False,
'required': ['id', 'label'],
}
}
}
}
def validate(self, value, model_instance):
if isinstance(value, dict) and 'dependencies' in value and \
not model_instance.managed_by_tower:
raise django_exceptions.ValidationError(
_("'dependencies' is not supported for custom credentials."),
code='invalid',
params={'value': value},
)
super(CredentialTypeInputField, self).validate(
value, model_instance
)
ids = {}
for field in value.get('fields', []):
id_ = field.get('id')
if id_ == 'tower':
raise django_exceptions.ValidationError(
_('"tower" is a reserved field name'),
code='invalid',
params={'value': value},
)
if id_ in ids:
raise django_exceptions.ValidationError(
_('field IDs must be unique (%s)' % id_),
code='invalid',
params={'value': value},
)
ids[id_] = True
if 'type' not in field:
# If no type is specified, default to string
field['type'] = 'string'
if field['type'] == 'become_method':
if not model_instance.managed_by_tower:
raise django_exceptions.ValidationError(
_('become_method is a reserved type name'),
code='invalid',
params={'value': value},
)
else:
field.pop('type')
field['choices'] = CHOICES_PRIVILEGE_ESCALATION_METHODS
for key in ('choices', 'multiline', 'format', 'secret',):
if key in field and field['type'] != 'string':
raise django_exceptions.ValidationError(
_('{sub_key} not allowed for {element_type} type ({element_id})'.format(
sub_key=key, element_type=field['type'], element_id=field['id'])),
code='invalid',
params={'value': value},
)
class CredentialTypeInjectorField(JSONSchemaField):
"""
Used to validate JSON for
`awx.main.models.credential:CredentialType().injectors`.
"""
def schema(self, model_instance):
return {
'type': 'object',
'additionalProperties': False,
'properties': {
'file': {
'type': 'object',
'patternProperties': {
r'^template(\.[a-zA-Z_]+[a-zA-Z0-9_]*)?$': {'type': 'string'},
},
'additionalProperties': False,
},
'env': {
'type': 'object',
'patternProperties': {
# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap08.html
# In the shell command language, a word consisting solely
# of underscores, digits, and alphabetics from the portable
# character set. The first character of a name is not
# a digit.
'^[a-zA-Z_]+[a-zA-Z0-9_]*$': {
'type': 'string',
# The environment variable _value_ can be any ascii,
# but pexpect will choke on any unicode
'pattern': '^[\x00-\x7F]*$'
},
},
'additionalProperties': False,
},
'extra_vars': {
'type': 'object',
'patternProperties': {
# http://docs.ansible.com/ansible/playbooks_variables.html#what-makes-a-valid-variable-name
'^[a-zA-Z_]+[a-zA-Z0-9_]*$': {'type': 'string'},
},
'additionalProperties': False,
},
},
'additionalProperties': False
}
def validate_env_var_allowed(self, env_var):
if env_var.startswith('ANSIBLE_'):
raise django_exceptions.ValidationError(
_('Environment variable {} may affect Ansible configuration so its '
'use is not allowed in credentials.').format(env_var),
code='invalid', params={'value': env_var},
)
if env_var in ENV_BLACKLIST:
raise django_exceptions.ValidationError(
_('Environment variable {} is blacklisted from use in credentials.').format(env_var),
code='invalid', params={'value': env_var},
)
def validate(self, value, model_instance):
super(CredentialTypeInjectorField, self).validate(
value, model_instance
)
# make sure the inputs are valid first
try:
CredentialTypeInputField().validate(model_instance.inputs, model_instance)
except django_exceptions.ValidationError:
# If `model_instance.inputs` itself is invalid, we can't make an
# estimation as to whether our Jinja templates contain valid field
# names; don't continue
return
# In addition to basic schema validation, search the injector fields
# for template variables and make sure they match the fields defined in
# the inputs
valid_namespace = dict(
(field, 'EXAMPLE')
for field in model_instance.defined_fields
)
class ExplodingNamespace:
def __unicode__(self):
raise UndefinedError(_('Must define unnamed file injector in order to reference `tower.filename`.'))
class TowerNamespace:
def __init__(self):
self.filename = ExplodingNamespace()
def __unicode__(self):
raise UndefinedError(_('Cannot directly reference reserved `tower` namespace container.'))
valid_namespace['tower'] = TowerNamespace()
# ensure either single file or multi-file syntax is used (but not both)
template_names = [x for x in value.get('file', {}).keys() if x.startswith('template')]
if 'template' in template_names:
valid_namespace['tower'].filename = 'EXAMPLE_FILENAME'
if len(template_names) > 1:
raise django_exceptions.ValidationError(
_('Must use multi-file syntax when injecting multiple files'),
code='invalid',
params={'value': value},
)
elif template_names:
for template_name in template_names:
template_name = template_name.split('.')[1]
setattr(valid_namespace['tower'].filename, template_name, 'EXAMPLE_FILENAME')
for type_, injector in value.items():
if type_ == 'env':
for key in injector.keys():
self.validate_env_var_allowed(key)
for key, tmpl in injector.items():
try:
Environment(
undefined=StrictUndefined
).from_string(tmpl).render(valid_namespace)
except UndefinedError as e:
raise django_exceptions.ValidationError(
_('{sub_key} uses an undefined field ({error_msg})').format(
sub_key=key, error_msg=e),
code='invalid',
params={'value': value},
)
except TemplateSyntaxError as e:
raise django_exceptions.ValidationError(
_('Syntax error rendering template for {sub_key} inside of {type} ({error_msg})').format(
sub_key=key, type=type_, error_msg=e),
code='invalid',
params={'value': value},
)
class AskForField(models.BooleanField):
"""
Denotes whether to prompt on launch for another field on the same template
"""
def __init__(self, allows_field=None, **kwargs):
super(AskForField, self).__init__(**kwargs)
self._allows_field = allows_field
@property
def allows_field(self):
if self._allows_field is None:
try:
return self.name[len('ask_'):-len('_on_launch')]
except AttributeError:
# self.name will be set by the model metaclass, not this field
raise Exception('Corresponding allows_field cannot be accessed until model is initialized.')
return self._allows_field
class OAuth2ClientSecretField(models.CharField):
def get_db_prep_value(self, value, connection, prepared=False):
return super(OAuth2ClientSecretField, self).get_db_prep_value(
encrypt_value(value), connection, prepared
)
def from_db_value(self, value, expression, connection, context):
if value and value.startswith('$encrypted$'):
return decrypt_value(get_encryption_key('value', pk=None), value)
return value
| |
# -*- coding: utf-8 -*-
# pep8-ignore: E501, E241
"""
IP subnet calculator.
.. moduleauthor:: Wijnand Modderman-Lenstra <maze@pyth0n.org>
.. note:: BSD License
About
=====
This module allows you to perform network calculations.
References
==========
References:
* http://www.estoile.com/links/ipv6.pdf
* http://www.iana.org/assignments/ipv4-address-space
* http://www.iana.org/assignments/multicast-addresses
* http://www.iana.org/assignments/ipv6-address-space
* http://www.iana.org/assignments/ipv6-tla-assignments
* http://www.iana.org/assignments/ipv6-multicast-addresses
* http://www.iana.org/assignments/ipv6-anycast-addresses
Thanks
======
Thanks to all who have contributed:
https://github.com/tehmaze/ipcalc/graphs/contributors
"""
from __future__ import print_function
__version__ = '1.99.0'
import re
import six
MAX_IPV6 = (1 << 128) - 1
MAX_IPV4 = (1 << 32) - 1
BASE_6TO4 = (0x2002 << 112)
class IP(object):
"""
Represent a single IP address.
:param ip: the ip address
:type ip: :class:`IP` or str or long or int
>>> localhost = IP("127.0.0.1")
>>> print(localhost)
127.0.0.1
>>> localhost6 = IP("::1")
>>> print(localhost6)
0000:0000:0000:0000:0000:0000:0000:0001
"""
# Hex-to-Bin conversion masks
_bitmask = {
'0': '0000', '1': '0001', '2': '0010', '3': '0011',
'4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'a': '1010', 'b': '1011',
'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'
}
# IP range specific information, see IANA allocations.
_range = {
# http://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml
4: {
'00000000': 'THIS HOST', # 0/8
'00001010': 'PRIVATE', # 10/8
'0110010001': 'SHARED ADDRESS SPACE', # 100.64/10
'01111111': 'LOOPBACK', # 127/8
'101011000001': 'PRIVATE', # 172.16/12
'110000000000000000000000': 'IETF PROTOCOL', # 192/24
'110000000000000000000010': 'TEST-NET-1', # 192.0.2/24
'110000000101100001100011': '6TO4-RELAY ANYCAST', # 192.88.99/24
'1100000010101000': 'PRIVATE', # 192.168/16
'110001100001001': 'BENCHMARKING', # 198.18/15
'110001100011001': 'TEST-NET-2', # 198.51.100/24
'110010110000000': 'TEST-NET-3', # 203.0.113/24
'1111': 'RESERVED', # 240/4
},
# http://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
6: {
'0' * 128: 'UNSPECIFIED', # ::/128
'0' * 127 + '1': 'LOOPBACK', # ::1/128
'0' * 96: 'IPV4COMP', # ::/96
'0' * 80 + '1' * 16: 'IPV4MAP', # ::ffff:0:0/96
# 64:ff9b::/96
'00000000011001001111111110011011' + 64 * '0': 'IPV4-IPV6',
'00000001' + 56 * '0': 'DISCARD-ONLY', # 100::/64
'0010000000000001' + 7 * '0': 'IETF PROTOCOL', # 2001::/23
'0010000000000001' + 16 * '0': 'TEREDO', # 2001::/32
# 2001:2::/48
'00100000000000010000000000000010000000000000000': 'BENCHMARKING',
'00100000000000010000110110111000': 'DOCUMENTATION', # 2001:db8::/32
'0010000000000001000000000001': 'DEPRECATED', # 2001:10::/28
'0010000000000001000000000010': 'ORCHIDv2', # 2001:20::/28
'0010000000000010': '6TO4', # 2002::/16
'11111100000000000': 'UNIQUE-LOCAL', # fc00::/7
'1111111010': 'LINK-LOCAL', # fe80::/10
}
}
def __init__(self, ip, mask=None, version=0):
"""Initialize a new IPv4 or IPv6 address."""
self.mask = mask
self.v = 0
# Parse input
if ip is None:
raise ValueError('Can not pass None')
elif isinstance(ip, IP):
self.ip = ip.ip
self.dq = ip.dq
self.v = ip.v
self.mask = ip.mask
elif isinstance(ip, six.integer_types):
self.ip = int(ip)
if self.ip <= MAX_IPV4:
self.v = version or 4
self.dq = self._itodq(ip)
else:
self.v = version or 6
self.dq = self._itodq(ip)
else:
# If string is in CIDR or netmask notation
if '/' in ip:
ip, mask = ip.split('/', 1)
self.mask = mask
self.v = version or 0
self.dq = ip
self.ip = self._dqtoi(ip)
assert self.v != 0, 'Could not parse input'
# Netmask defaults to one ip
if self.mask is None:
self.mask = {4: 32, 6: 128}[self.v]
# Netmask is numeric CIDR subnet
elif isinstance(self.mask, six.integer_types) or self.mask.isdigit():
self.mask = int(self.mask)
# Netmask is in subnet notation
elif isinstance(self.mask, six.string_types):
limit = [32, 128][':' in self.mask]
inverted = ~self._dqtoi(self.mask)
if inverted == -1:
self.mask = 0
else:
count = 0
while inverted & pow(2, count):
count += 1
self.mask = (limit - count)
else:
raise ValueError('Invalid netmask')
# Validate subnet size
if self.v == 6:
self.dq = self._itodq(self.ip)
if not 0 <= self.mask <= 128:
raise ValueError('IPv6 subnet size must be between 0 and 128')
elif self.v == 4:
if not 0 <= self.mask <= 32:
raise ValueError('IPv4 subnet size must be between 0 and 32')
def bin(self):
"""Full-length binary representation of the IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.bin())
01111111000000000000000000000001
"""
bits = self.v == 4 and 32 or 128
return bin(self.ip).split('b')[1].rjust(bits, '0')
def hex(self):
"""Full-length hexadecimal representation of the IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.hex())
7f000001
"""
if self.v == 4:
return '%08x' % self.ip
else:
return '%032x' % self.ip
def subnet(self):
"""CIDR subnet size."""
return self.mask
def version(self):
"""IP version.
>>> ip = IP("127.0.0.1")
>>> print(ip.version())
4
"""
return self.v
def info(self):
"""Show IANA allocation information for the current IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.info())
LOOPBACK
"""
b = self.bin()
for i in range(len(b), 0, -1):
if b[:i] in self._range[self.v]:
return self._range[self.v][b[:i]]
return 'UNKNOWN'
def _dqtoi(self, dq):
"""Convert dotquad or hextet to long."""
# hex notation
if dq.startswith('0x'):
self._dqtoi_hex(self, dq)
# IPv6
if ':' in dq:
return self._dqtoi_ipv6(dq)
elif len(dq) == 32:
# Assume full heximal notation
self.v = 6
return int(dq, 16)
# IPv4
if '.' in dq:
return self._dqtoi_ipv4(dq)
raise ValueError('Invalid address input')
def _dqtoi_hex(self, dq):
ip = int(dq[2:], 16)
if ip > MAX_IPV6:
raise ValueError('%s: IP address is bigger than 2^128' % dq)
if ip <= MAX_IPV4:
self.v = 4
else:
self.v = 6
return ip
def _dqtoi_ipv4(self, dq):
q = dq.split('.')
q.reverse()
if len(q) > 4:
raise ValueError('%s: IPv4 address invalid: '
'more than 4 bytes' % dq)
for x in q:
if not 0 <= int(x) <= 255:
raise ValueError('%s: IPv4 address invalid: '
'bytes should be between 0 and 255' % dq)
while len(q) < 4:
q.insert(1, '0')
self.v = 4
return sum(int(byte) << 8 * index for index, byte in enumerate(q))
def _dqtoi_ipv6(self, dq):
# Split hextets
hx = dq.split(':')
if ':::' in dq:
raise ValueError("%s: IPv6 address can't contain :::" % dq)
# Mixed address (or 4-in-6), ::ffff:192.0.2.42
if '.' in dq:
return self._dqtoi(hx[-1])
if len(hx) > 8:
raise ValueError('%s: IPv6 address with more than 8 hexlets' % dq)
elif len(hx) < 8:
# No :: in address
if '' not in hx:
raise ValueError('%s: IPv6 address invalid: '
'compressed format malformed' % dq)
elif not (dq.startswith('::') or dq.endswith('::')) and len([x for x in hx if x == '']) > 1:
raise ValueError('%s: IPv6 address invalid: '
'compressed format malformed' % dq)
ix = hx.index('')
px = len(hx[ix + 1:])
for x in range(ix + px + 1, 8):
hx.insert(ix, '0')
elif dq.endswith('::'):
pass
elif '' in hx:
raise ValueError('%s: IPv6 address invalid: '
'compressed format detected in full notation' % dq())
ip = ''
hx = [x == '' and '0' or x for x in hx]
for h in hx:
if len(h) < 4:
h = '%04x' % int(h, 16)
if not 0 <= int(h, 16) <= 0xffff:
raise ValueError('%r: IPv6 address invalid: '
'hexlets should be between 0x0000 and 0xffff' % dq)
ip += h
self.v = 6
return int(ip, 16)
def _itodq(self, n):
"""Convert long to dotquad or hextet."""
if self.v == 4:
return '.'.join(map(str, [
(n >> 24) & 0xff,
(n >> 16) & 0xff,
(n >> 8) & 0xff,
n & 0xff,
]))
else:
n = '%032x' % n
return ':'.join(n[4 * x:4 * x + 4] for x in range(0, 8))
def __str__(self):
"""Return dotquad representation of the IP.
>>> ip = IP("::1")
>>> print(str(ip))
0000:0000:0000:0000:0000:0000:0000:0001
"""
return self.dq
def __repr__(self):
"""Return canonical representation of the IP.
>>> repr(IP("::1"))
"IP('::1')"
>>> repr(IP("fe80:0000:0000:0000:abde:3eff:ffab:0012/64"))
"IP('fe80::abde:3eff:ffab:12/64')"
>>> repr(IP("1.2.3.4/29"))
"IP('1.2.3.4/29')"
>>> repr(IP("127.0.0.1/8"))
"IP('127.0.0.1/8')"
"""
dq = self.dq if self.v == 4 else self.to_compressed()
args = (self.__class__.__name__, dq, self.mask)
if (self.version(), self.mask) in [(4, 32), (6, 128)]:
fmt = "{0}('{1}')"
else:
fmt = "{0}('{1}/{2}')"
return fmt.format(*args)
def __hash__(self):
"""Hash for collection operations and py:`hash()`."""
return hash(self.to_tuple())
hash = __hash__
def __int__(self):
"""Convert to int."""
return int(self.ip)
def __long__(self):
"""Convert to long."""
return self.ip
def __lt__(self, other):
"""Less than other test."""
return int(self) < int(IP(other))
def __le__(self, other):
"""Less than or equal to other test."""
return int(self) <= int(IP(other))
def __ge__(self, other):
"""Greater than or equal to other test."""
return int(self) >= int(IP(other))
def __gt__(self, other):
"""Greater than other."""
return int(self) > int(IP(other))
def __eq__(self, other):
"""Test if other is address is equal to the current address."""
return int(self) == int(IP(other))
def __add__(self, offset):
"""Add numeric offset to the IP."""
if not isinstance(offset, six.integer_types):
return ValueError('Value is not numeric')
return self.__class__(self.ip + offset, mask=self.mask, version=self.v)
def __sub__(self, offset):
"""Substract numeric offset from the IP."""
if not isinstance(offset, six.integer_types):
return ValueError('Value is not numeric')
return self.__class__(self.ip - offset, mask=self.mask, version=self.v)
def size(self):
"""Return network size."""
return 1
def clone(self):
"""
Return a new <IP> object with a copy of this one.
>>> ip = IP('127.0.0.1')
>>> ip2 = ip.clone()
>>> ip2
IP('127.0.0.1')
>>> ip is ip2
False
>>> ip == ip2
True
>>> ip.mask = 24
>>> ip2.mask
32
"""
return IP(self)
def to_compressed(self):
"""
Compress an IP address to its shortest possible compressed form.
>>> print(IP('127.0.0.1').to_compressed())
127.1
>>> print(IP('127.1.0.1').to_compressed())
127.1.1
>>> print(IP('127.0.1.1').to_compressed())
127.0.1.1
>>> print(IP('2001:1234:0000:0000:0000:0000:0000:5678').to_compressed())
2001:1234::5678
>>> print(IP('1234:0000:0000:beef:0000:0000:0000:5678').to_compressed())
1234:0:0:beef::5678
>>> print(IP('0000:0000:0000:0000:0000:0000:0000:0001').to_compressed())
::1
>>> print(IP('fe80:0000:0000:0000:0000:0000:0000:0000').to_compressed())
fe80::
"""
if self.v == 4:
quads = self.dq.split('.')
try:
zero = quads.index('0')
if zero == 1 and quads.index('0', zero + 1):
quads.pop(zero)
quads.pop(zero)
return '.'.join(quads)
elif zero == 2:
quads.pop(zero)
return '.'.join(quads)
except ValueError: # No zeroes
pass
return self.dq
else:
quads = map(lambda q: '%x' % (int(q, 16)), self.dq.split(':'))
quadc = ':%s:' % (':'.join(quads),)
zeros = [0, -1]
# Find the largest group of zeros
for match in re.finditer(r'(:[:0]+)', quadc):
count = len(match.group(1)) - 1
if count > zeros[0]:
zeros = [count, match.start(1)]
count, where = zeros
if count:
quadc = quadc[:where] + ':' + quadc[where + count:]
quadc = re.sub(r'((^:)|(:$))', '', quadc)
quadc = re.sub(r'((^:)|(:$))', '::', quadc)
return quadc
def to_ipv4(self):
"""
Convert (an IPv6) IP address to an IPv4 address, if possible.
Only works for IPv4-compat (::/96), IPv4-mapped (::ffff/96), and 6-to-4
(2002::/16) addresses.
>>> ip = IP('2002:c000:022a::')
>>> print(ip.to_ipv4())
192.0.2.42
"""
if self.v == 4:
return self
else:
if self.bin().startswith('0' * 96):
return IP(int(self), version=4)
elif self.bin().startswith('0' * 80 + '1' * 16):
return IP(int(self) & MAX_IPV4, version=4)
elif int(self) & BASE_6TO4:
return IP((int(self) - BASE_6TO4) >> 80, version=4)
else:
return ValueError('%s: IPv6 address is not IPv4 compatible or mapped, '
'nor an 6-to-4 IP' % self.dq)
@classmethod
def from_bin(cls, value):
"""Initialize a new network from binary notation."""
value = value.lstrip('b')
if len(value) == 32:
return cls(int(value, 2))
elif len(value) == 128:
return cls(int(value, 2))
else:
return ValueError('%r: invalid binary notation' % (value,))
@classmethod
def from_hex(cls, value):
"""Initialize a new network from hexadecimal notation."""
if len(value) == 8:
return cls(int(value, 16))
elif len(value) == 32:
return cls(int(value, 16))
else:
raise ValueError('%r: invalid hexadecimal notation' % (value,))
def to_ipv6(self, ip_type='6-to-4'):
"""
Convert (an IPv4) IP address to an IPv6 address.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_ipv6())
2002:c000:022a:0000:0000:0000:0000:0000
>>> print(ip.to_ipv6('compat'))
0000:0000:0000:0000:0000:0000:c000:022a
>>> print(ip.to_ipv6('mapped'))
0000:0000:0000:0000:0000:ffff:c000:022a
"""
assert ip_type in ['6-to-4', 'compat', 'mapped'], 'Conversion ip_type not supported'
if self.v == 4:
if ip_type == '6-to-4':
return IP(BASE_6TO4 | int(self) << 80, version=6)
elif ip_type == 'compat':
return IP(int(self), version=6)
elif ip_type == 'mapped':
return IP(0xffff << 32 | int(self), version=6)
else:
return self
def to_reverse(self):
"""Convert the IP address to a PTR record.
Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_reverse())
42.2.0.192.in-addr.arpa
>>> print(ip.to_ipv6().to_reverse())
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa
"""
if self.v == 4:
return '.'.join(list(self.dq.split('.')[::-1]) + ['in-addr', 'arpa'])
else:
return '.'.join(list(self.hex())[::-1] + ['ip6', 'arpa'])
def to_tuple(self):
"""Used for comparisons."""
return (self.dq, self.mask)
class Network(IP):
"""
Network slice calculations.
:param ip: network address
:type ip: :class:`IP` or str or long or int
:param mask: netmask
:type mask: int or str
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet)
127.0.0.1/8
"""
def netmask(self):
"""
Network netmask derived from subnet size, as IP object.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.netmask())
255.0.0.0
"""
return IP(self.netmask_long(), version=self.version())
def netmask_long(self):
"""
Network netmask derived from subnet size, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.netmask_long())
4278190080
"""
if self.version() == 4:
return (MAX_IPV4 >> (32 - self.mask)) << (32 - self.mask)
else:
return (MAX_IPV6 >> (128 - self.mask)) << (128 - self.mask)
def network(self):
"""
Network address, as IP object.
>>> localnet = Network('127.128.99.3/8')
>>> print(localnet.network())
127.0.0.0
"""
return IP(self.network_long(), version=self.version())
def network_long(self):
"""
Network address, as long.
>>> localnet = Network('127.128.99.3/8')
>>> print(localnet.network_long())
2130706432
"""
return self.ip & self.netmask_long()
def broadcast(self):
"""
Broadcast address, as IP object.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.broadcast())
127.255.255.255
"""
# XXX: IPv6 doesn't have a broadcast address, but it's used for other
# calculations such as <Network.host_last>
return IP(self.broadcast_long(), version=self.version())
def broadcast_long(self):
"""
Broadcast address, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.broadcast_long())
2147483647
"""
if self.version() == 4:
return self.network_long() | (MAX_IPV4 - self.netmask_long())
else:
return self.network_long() \
| (MAX_IPV6 - self.netmask_long())
def host_first(self):
"""First available host in this subnet."""
if (self.version() == 4 and self.mask > 30) or \
(self.version() == 6 and self.mask > 126):
return self
else:
return IP(self.network_long() + 1, version=self.version())
def host_last(self):
"""Last available host in this subnet."""
if (self.version() == 4 and self.mask == 32) or \
(self.version() == 6 and self.mask == 128):
return self
elif (self.version() == 4 and self.mask == 31) or \
(self.version() == 6 and self.mask == 127):
return IP(int(self) + 1, version=self.version())
else:
return IP(self.broadcast_long() - 1, version=self.version())
def check_collision(self, other):
"""Check another network against the given network."""
other = Network(other)
return self.network_long() <= other.network_long() <= self.broadcast_long() or \
other.network_long() <= self.network_long() <= other.broadcast_long()
def __str__(self):
"""
Return CIDR representation of the network.
>>> net = Network("::1/64")
>>> print(str(net))
0000:0000:0000:0000:0000:0000:0000:0001/64
"""
return "%s/%d" % (self.dq, self.mask)
def __contains__(self, ip):
"""
Check if the given ip is part of the network.
>>> '192.0.2.42' in Network('192.0.2.0/24')
True
>>> '192.168.2.42' in Network('192.0.2.0/24')
False
"""
return self.check_collision(ip)
def __lt__(self, other):
"""Compare less than."""
return self.size() < IP(other).size()
def __le__(self, other):
"""Compare less than or equal to."""
return self.size() <= IP(other).size()
def __gt__(self, other):
"""Compare greater than."""
return self.size() > IP(other).size()
def __ge__(self, other):
"""Compare greater than or equal to."""
return self.size() >= IP(other).size()
def __eq__(self, other):
"""Compare equal."""
return self.size() == IP(other).size()
def __getitem__(self, key):
"""Get the nth item or slice of the network."""
if isinstance(key, slice):
# Work-around IPv6 subnets being huge. Slice indices don't like
# long int.
x = key.start or 0
slice_stop = (key.stop or self.size()) - 1
slice_step = key.step or 1
arr = list()
while x < slice_stop:
arr.append(IP(int(self) + x))
x += slice_step
return tuple(arr)
else:
return IP(int(self) + key)
def __iter__(self):
"""Generate a range of usable host IP addresses within the network.
>>> for ip in Network('192.168.114.0/30'):
... print(str(ip))
...
192.168.114.1
192.168.114.2
"""
curr = int(self.host_first())
stop = int(self.host_last())
while curr <= stop:
yield IP(curr)
curr += 1
def has_key(self, ip):
"""
Check if the given ip is part of the network.
:param ip: the ip address
:type ip: :class:`IP` or str or long or int
>>> net = Network('192.0.2.0/24')
>>> net.has_key('192.168.2.0')
False
>>> net.has_key('192.0.2.42')
True
"""
return self.__contains__(ip)
def size(self):
"""
Number of ip's within the network.
>>> net = Network('192.0.2.0/24')
>>> print(net.size())
256
"""
return 2 ** ({4: 32, 6: 128}[self.version()] - self.mask)
def cidr(self):
"""
Return CIDR of network.
>>> net = Network('192.0.2.0/22')
>>> print(net.cidr())
22
"""
return self.mask
if __name__ == '__main__':
tests = [
('192.168.114.42', 23, ['192.168.0.1', '192.168.114.128', '10.0.0.1']),
('123::', 128, ['123:456::', '::1', '123::456']),
('::42', 64, ['::1', '1::']),
('2001:dead:beef:1:c01d:c01a::', 48, ['2001:dead:beef:babe::']),
('10.10.0.0', '255.255.255.0', ['10.10.0.20', '10.10.10.20']),
('2001:dead:beef:1:c01d:c01a::', 'ffff:ffff:ffff::', ['2001:dead:beef:babe::']),
('10.10.0.0/255.255.240.0', None, ['10.10.0.20', '10.10.250.0']),
]
#
for address, netmask, test_ips in tests:
net = Network(address, netmask)
print('===========')
print('ip address: {0}'.format(net))
print('to ipv6...: {0}'.format(net.to_ipv6()))
print('ip version: {0}'.format(net.version()))
print('ip info...: {0}'.format(net.info()))
print('subnet....: {0}'.format(net.subnet()))
print('num ip\'s.. {0}:'.format(net.size()))
print('integer...: {0}'.format(int(net)))
print('hex.......: {0}'.format(net.hex()))
print('netmask...: {0}'.format(net.netmask()))
# Not implemented in IPv6
if net.version() == 4:
print('network...: {0}'.format(net.network()))
print('broadcast.: {0}'.format(net.broadcast()))
print('first host: {0}'.format(net.host_first()))
print('reverse...: {0}'.format(net.host_first().to_reverse()))
print('last host.: {0}'.format(net.host_last()))
print('reverse...: {0}'.format(net.host_last().to_reverse()))
for test_ip in test_ips:
print('{0} in network: {1}'.format(test_ip, test_ip in net))
| |
"""Support for control of ElkM1 sensors."""
from . import DOMAIN as ELK_DOMAIN, ElkEntity, create_elk_entities
DEPENDENCIES = [ELK_DOMAIN]
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Create the Elk-M1 sensor platform."""
if discovery_info is None:
return
elk = hass.data[ELK_DOMAIN]['elk']
entities = create_elk_entities(
hass, elk.counters, 'counter', ElkCounter, [])
entities = create_elk_entities(
hass, elk.keypads, 'keypad', ElkKeypad, entities)
entities = create_elk_entities(
hass, [elk.panel], 'panel', ElkPanel, entities)
entities = create_elk_entities(
hass, elk.settings, 'setting', ElkSetting, entities)
entities = create_elk_entities(
hass, elk.zones, 'zone', ElkZone, entities)
async_add_entities(entities, True)
def temperature_to_state(temperature, undefined_temperature):
"""Convert temperature to a state."""
return temperature if temperature > undefined_temperature else None
class ElkSensor(ElkEntity):
"""Base representation of Elk-M1 sensor."""
def __init__(self, element, elk, elk_data):
"""Initialize the base of all Elk sensors."""
super().__init__(element, elk, elk_data)
self._state = None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
class ElkCounter(ElkSensor):
"""Representation of an Elk-M1 Counter."""
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:numeric'
def _element_changed(self, element, changeset):
self._state = self._element.value
class ElkKeypad(ElkSensor):
"""Representation of an Elk-M1 Keypad."""
@property
def temperature_unit(self):
"""Return the temperature unit."""
return self._temperature_unit
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._temperature_unit
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:thermometer-lines'
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
from elkm1_lib.util import username
attrs = self.initial_attrs()
attrs['area'] = self._element.area + 1
attrs['temperature'] = self._element.temperature
attrs['last_user_time'] = self._element.last_user_time.isoformat()
attrs['last_user'] = self._element.last_user + 1
attrs['code'] = self._element.code
attrs['last_user_name'] = username(self._elk, self._element.last_user)
attrs['last_keypress'] = self._element.last_keypress
return attrs
def _element_changed(self, element, changeset):
self._state = temperature_to_state(self._element.temperature, -40)
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes and update entity state."""
await super().async_added_to_hass()
self.hass.data[ELK_DOMAIN]['keypads'][
self._element.index] = self.entity_id
class ElkPanel(ElkSensor):
"""Representation of an Elk-M1 Panel."""
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:home"
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs['system_trouble_status'] = self._element.system_trouble_status
return attrs
def _element_changed(self, element, changeset):
if self._elk.is_connected():
self._state = 'Paused' if self._element.remote_programming_status \
else 'Connected'
else:
self._state = 'Disconnected'
class ElkSetting(ElkSensor):
"""Representation of an Elk-M1 Setting."""
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:numeric'
def _element_changed(self, element, changeset):
self._state = self._element.value
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
from elkm1_lib.const import SettingFormat
attrs = self.initial_attrs()
attrs['value_format'] = SettingFormat(
self._element.value_format).name.lower()
return attrs
class ElkZone(ElkSensor):
"""Representation of an Elk-M1 Zone."""
@property
def icon(self):
"""Icon to use in the frontend."""
from elkm1_lib.const import ZoneType
zone_icons = {
ZoneType.FIRE_ALARM.value: 'fire',
ZoneType.FIRE_VERIFIED.value: 'fire',
ZoneType.FIRE_SUPERVISORY.value: 'fire',
ZoneType.KEYFOB.value: 'key',
ZoneType.NON_ALARM.value: 'alarm-off',
ZoneType.MEDICAL_ALARM.value: 'medical-bag',
ZoneType.POLICE_ALARM.value: 'alarm-light',
ZoneType.POLICE_NO_INDICATION.value: 'alarm-light',
ZoneType.KEY_MOMENTARY_ARM_DISARM.value: 'power',
ZoneType.KEY_MOMENTARY_ARM_AWAY.value: 'power',
ZoneType.KEY_MOMENTARY_ARM_STAY.value: 'power',
ZoneType.KEY_MOMENTARY_DISARM.value: 'power',
ZoneType.KEY_ON_OFF.value: 'toggle-switch',
ZoneType.MUTE_AUDIBLES.value: 'volume-mute',
ZoneType.POWER_SUPERVISORY.value: 'power-plug',
ZoneType.TEMPERATURE.value: 'thermometer-lines',
ZoneType.ANALOG_ZONE.value: 'speedometer',
ZoneType.PHONE_KEY.value: 'phone-classic',
ZoneType.INTERCOM_KEY.value: 'deskphone'
}
return 'mdi:{}'.format(
zone_icons.get(self._element.definition, 'alarm-bell'))
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
from elkm1_lib.const import (
ZoneLogicalStatus, ZonePhysicalStatus, ZoneType)
attrs = self.initial_attrs()
attrs['physical_status'] = ZonePhysicalStatus(
self._element.physical_status).name.lower()
attrs['logical_status'] = ZoneLogicalStatus(
self._element.logical_status).name.lower()
attrs['definition'] = ZoneType(
self._element.definition).name.lower()
attrs['area'] = self._element.area + 1
attrs['bypassed'] = self._element.bypassed
attrs['triggered_alarm'] = self._element.triggered_alarm
return attrs
@property
def temperature_unit(self):
"""Return the temperature unit."""
from elkm1_lib.const import ZoneType
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
from elkm1_lib.const import ZoneType
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
if self._element.definition == ZoneType.ANALOG_ZONE.value:
return 'V'
return None
def _element_changed(self, element, changeset):
from elkm1_lib.const import ZoneLogicalStatus, ZoneType
from elkm1_lib.util import pretty_const
if self._element.definition == ZoneType.TEMPERATURE.value:
self._state = temperature_to_state(self._element.temperature, -60)
elif self._element.definition == ZoneType.ANALOG_ZONE.value:
self._state = self._element.voltage
else:
self._state = pretty_const(ZoneLogicalStatus(
self._element.logical_status).name)
| |
# Copyright (c) 2006-2021 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import re
from pybtex.richtext import Symbol, Text
from pybtex.style.formatting import BaseStyle, toplevel
from pybtex.style.template import (
field, first_of, href, join, names, optional, optional_field, sentence,
tag, together, words
)
def dashify(text):
dash_re = re.compile(r'-+')
return Text(Symbol('ndash')).join(text.split(dash_re))
pages = field('pages', apply_func=dashify)
date = words [optional_field('month'), field('year')]
class Style(BaseStyle):
def format_names(self, role, as_sentence=True):
formatted_names = names(role, sep=', ', sep2 = ' and ', last_sep=', and ')
if as_sentence:
return sentence [formatted_names]
else:
return formatted_names
def get_article_template(self, e):
volume_and_pages = first_of [
# volume and pages, with optional issue number
optional [
join [
field('volume'),
optional['(', field('number'),')'],
':', pages
],
],
# pages only
words ['pages', pages],
]
template = toplevel [
self.format_names('author'),
self.format_title(e, 'title'),
sentence [
tag('em') [field('journal')],
optional[ volume_and_pages ],
date],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def format_author_or_editor(self, e):
return first_of [
optional[ self.format_names('author') ],
self.format_editor(e),
]
def format_editor(self, e, as_sentence=True):
editors = self.format_names('editor', as_sentence=False)
if 'editor' not in e.persons:
# when parsing the template, a FieldIsMissing exception
# will be thrown anyway; no need to do anything now,
# just return the template that will throw the exception
return editors
if len(e.persons['editor']) > 1:
word = 'editors'
else:
word = 'editor'
result = join(sep=', ') [editors, word]
if as_sentence:
return sentence [result]
else:
return result
def format_volume_and_series(self, e, as_sentence=True):
volume_and_series = optional [
words [
together ['Volume' if as_sentence else 'volume', field('volume')], optional [
words ['of', field('series')]
]
]
]
number_and_series = optional [
words [
join(sep=Symbol('nbsp')) ['Number' if as_sentence else 'number', field('number')],
optional [
words ['in', field('series')]
]
]
]
series = optional_field('series')
result = first_of [
volume_and_series,
number_and_series,
series,
]
if as_sentence:
return sentence(capfirst=True) [result]
else:
return result
def format_chapter_and_pages(self, e):
return join(sep=', ') [
optional [together ['chapter', field('chapter')]],
optional [together ['pages', pages]],
]
def format_edition(self, e):
return optional [
words [
field('edition', apply_func=lambda x: x.lower()),
'edition',
]
]
def format_title(self, e, which_field, as_sentence=True):
formatted_title = field(
which_field, apply_func=lambda text: text.capitalize()
)
if as_sentence:
return sentence [ formatted_title ]
else:
return formatted_title
def format_btitle(self, e, which_field, as_sentence=True):
formatted_title = tag('em') [ field(which_field) ]
if as_sentence:
return sentence[ formatted_title ]
else:
return formatted_title
def format_address_organization_publisher_date(
self, e, include_organization=True):
"""Format address, organization, publisher, and date.
Everything is optional, except the date.
"""
# small difference from unsrt.bst here: unsrt.bst
# starts a new sentence only if the address is missing;
# for simplicity here we always start a new sentence
if include_organization:
organization = optional_field('organization')
else:
organization = None
return first_of[
# this will be rendered if there is an address
optional [
join(sep=' ') [
sentence[
field('address'),
date,
],
sentence[
organization,
optional_field('publisher'),
],
],
],
# if there is no address then we have this
sentence[
organization,
optional_field('publisher'),
date,
],
]
def get_book_template(self, e):
template = toplevel [
self.format_author_or_editor(e),
self.format_btitle(e, 'title'),
self.format_volume_and_series(e),
sentence [
field('publisher'),
optional_field('address'),
self.format_edition(e),
date
],
optional[ sentence [ self.format_isbn(e) ] ],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_booklet_template(self, e):
template = toplevel [
self.format_names('author'),
self.format_title(e, 'title'),
sentence [
optional_field('howpublished'),
optional_field('address'),
date,
optional_field('note'),
],
self.format_web_refs(e),
]
return template
def get_inbook_template(self, e):
template = toplevel [
self.format_author_or_editor(e),
sentence [
self.format_btitle(e, 'title', as_sentence=False),
self.format_chapter_and_pages(e),
],
self.format_volume_and_series(e),
sentence [
field('publisher'),
optional_field('address'),
optional [
words [field('edition'), 'edition']
],
date,
optional_field('note'),
],
self.format_web_refs(e),
]
return template
def get_incollection_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
words [
'In',
sentence [
optional[ self.format_editor(e, as_sentence=False) ],
self.format_btitle(e, 'booktitle', as_sentence=False),
self.format_volume_and_series(e, as_sentence=False),
self.format_chapter_and_pages(e),
],
],
sentence [
optional_field('publisher'),
optional_field('address'),
self.format_edition(e),
date,
],
self.format_web_refs(e),
]
return template
def get_inproceedings_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
words [
'In',
sentence [
optional[ self.format_editor(e, as_sentence=False) ],
self.format_btitle(e, 'booktitle', as_sentence=False),
self.format_volume_and_series(e, as_sentence=False),
optional[ pages ],
],
self.format_address_organization_publisher_date(e),
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_manual_template(self, e):
# TODO this only corresponds to the bst style if author is non-empty
# for empty author we should put the organization first
template = toplevel [
optional [ sentence [ self.format_names('author') ] ],
self.format_btitle(e, 'title'),
sentence [
optional_field('organization'),
optional_field('address'),
self.format_edition(e),
optional[ date ],
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_mastersthesis_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
sentence[
"Master's thesis",
field('school'),
optional_field('address'),
date,
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_misc_template(self, e):
template = toplevel [
optional[ sentence [self.format_names('author')] ],
optional[ self.format_title(e, 'title') ],
sentence[
optional[ field('howpublished') ],
optional[ date ],
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_online_template(self, e):
return self.get_misc_template(e)
def get_phdthesis_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_btitle(e, 'title'),
sentence[
first_of [
optional_field('type'),
'PhD thesis',
],
field('school'),
optional_field('address'),
date,
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_proceedings_template(self, e):
if 'editor' in e.persons:
main_part = [
self.format_editor(e),
sentence [
self.format_btitle(e, 'title', as_sentence=False),
self.format_volume_and_series(e, as_sentence=False),
self.format_address_organization_publisher_date(e),
],
]
else:
main_part = [
optional [ sentence [ field('organization') ] ],
sentence [
self.format_btitle(e, 'title', as_sentence=False),
self.format_volume_and_series(e, as_sentence=False),
self.format_address_organization_publisher_date(
e, include_organization=False),
],
]
template = toplevel [
main_part + [
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
]
return template
def get_techreport_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
sentence [
words[
first_of [
optional_field('type'),
'Technical Report',
],
optional_field('number'),
],
field('institution'),
optional_field('address'),
date,
],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
def get_unpublished_template(self, e):
template = toplevel [
sentence [self.format_names('author')],
self.format_title(e, 'title'),
sentence [
field('note'),
optional[ date ]
],
self.format_web_refs(e),
]
return template
def format_web_refs(self, e):
# based on urlbst output.web.refs
return sentence [
optional [ self.format_url(e),
optional [ ' (visited on ', field('urldate'), ')' ] ],
optional [ self.format_eprint(e) ],
optional [ self.format_pubmed(e) ],
optional [ self.format_doi(e) ],
]
def format_url(self, e):
# based on urlbst format.url
url = field('url', raw=True)
return words [
'URL:',
href(url) [ url ]
]
def format_pubmed(self, e):
# based on urlbst format.pubmed
url = join [ 'https://www.ncbi.nlm.nih.gov/pubmed/', field('pubmed', raw=True) ]
return href(url) [
join [
'PMID:',
field('pubmed', raw=True)
]
]
def format_doi(self, e):
# based on urlbst format.doi
url = join [ 'https://doi.org/', field('doi', raw=True) ]
return href(url) [
join [
'doi:',
field('doi', raw=True)
]
]
def format_eprint(self, e):
# based on urlbst format.eprint
url = join [ 'https://arxiv.org/abs/', field('eprint', raw=True) ]
return href(url) [
join [
'arXiv:',
field('eprint', raw=True)
]
]
def format_isbn(self, e):
return join(sep=' ') [ 'ISBN', field('isbn') ]
| |
# Author: Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import warnings
from nose.tools import assert_equal, assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_almost_equal,
assert_allclose, assert_array_almost_equal)
from mne.tests.common import assert_dig_allclose
from mne.channels.montage import read_montage, _set_montage, read_dig_montage
from mne.utils import _TempDir, run_tests_if_main
from mne import create_info, EvokedArray, read_evokeds
from mne.coreg import fit_matched_points
from mne.transforms import apply_trans, get_ras_to_neuromag_trans
from mne.io.constants import FIFF
from mne.io.meas_info import _read_dig_points
from mne.io.kit import read_mrk
from mne.io import read_raw_brainvision
from mne.datasets import testing
data_path = testing.data_path(download=False)
fif_dig_montage_fname = op.join(data_path, 'montage', 'eeganes07.fif')
evoked_fname = op.join(data_path, 'montage', 'level2_raw-ave.fif')
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
elp = op.join(kit_dir, 'test_elp.txt')
hsp = op.join(kit_dir, 'test_hsp.txt')
hpi = op.join(kit_dir, 'test_mrk.sqd')
bv_fname = op.join(io_dir, 'brainvision', 'tests', 'data', 'test.vhdr')
def test_montage():
"""Test making montages"""
tempdir = _TempDir()
# no pep8
input_str = ["""FidNz 0.00000 10.56381 -2.05108
FidT9 -7.82694 0.45386 -3.76056
FidT10 7.82694 0.45386 -3.76056""",
"""// MatLab Sphere coordinates [degrees] Cartesian coordinates
// Label Theta Phi Radius X Y Z off sphere surface
E1 37.700 -14.000 1.000 0.7677 0.5934 -0.2419 -0.00000000000000011
E2 44.600 -0.880 1.000 0.7119 0.7021 -0.0154 0.00000000000000000
E3 51.700 11.000 1.000 0.6084 0.7704 0.1908 0.00000000000000000""", # noqa
"""# ASA electrode file
ReferenceLabel avg
UnitPosition mm
NumberPositions= 68
Positions
-86.0761 -19.9897 -47.9860
85.7939 -20.0093 -48.0310
0.0083 86.8110 -39.9830
Labels
LPA
RPA
Nz
""",
"""Site Theta Phi
Fp1 -92 -72
Fp2 92 72
F3 -60 -51
""",
"""346
EEG F3 -62.027 -50.053 85
EEG Fz 45.608 90 85
EEG F4 62.01 50.103 85
""",
"""
eeg Fp1 -95.0 -31.0 -3.0
eeg AF7 -81 -59 -3
eeg AF3 -87 -41 28
"""]
kinds = ['test.sfp', 'test.csd', 'test.elc', 'test.txt', 'test.elp',
'test.hpts']
for kind, text in zip(kinds, input_str):
fname = op.join(tempdir, kind)
with open(fname, 'w') as fid:
fid.write(text)
montage = read_montage(fname)
assert_equal(len(montage.ch_names), 3)
assert_equal(len(montage.ch_names), len(montage.pos))
assert_equal(montage.pos.shape, (3, 3))
assert_equal(montage.kind, op.splitext(kind)[0])
if kind.endswith('csd'):
dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
('off_sph', 'f8')]
try:
table = np.loadtxt(fname, skip_header=2, dtype=dtype)
except TypeError:
table = np.loadtxt(fname, skiprows=2, dtype=dtype)
pos2 = np.c_[table['x'], table['y'], table['z']]
assert_array_almost_equal(pos2, montage.pos, 4)
# test transform
input_str = """
eeg Fp1 -95.0 -31.0 -3.0
eeg AF7 -81 -59 -3
eeg AF3 -87 -41 28
cardinal 2 -91 0 -42
cardinal 1 0 -91 -42
cardinal 3 0 91 -42
"""
kind = 'test_fid.hpts'
fname = op.join(tempdir, kind)
with open(fname, 'w') as fid:
fid.write(input_str)
montage = read_montage(op.join(tempdir, 'test_fid.hpts'), transform=True)
# check coordinate transformation
pos = np.array([-95.0, -31.0, -3.0])
nasion = np.array([-91, 0, -42])
lpa = np.array([0, -91, -42])
rpa = np.array([0, 91, -42])
fids = np.vstack((nasion, lpa, rpa))
trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
pos = apply_trans(trans, pos)
assert_array_equal(montage.pos[0], pos)
idx = montage.ch_names.index('2')
assert_array_equal(montage.pos[idx, [0, 2]], [0, 0])
idx = montage.ch_names.index('1')
assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
idx = montage.ch_names.index('3')
assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
pos = np.array([-95.0, -31.0, -3.0])
montage_fname = op.join(tempdir, 'test_fid.hpts')
montage = read_montage(montage_fname, unit='mm')
assert_array_equal(montage.pos[0], pos * 1e-3)
# test with last
info = create_info(montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
_set_montage(info, montage)
pos2 = np.array([c['loc'][:3] for c in info['chs']])
assert_array_equal(pos2, montage.pos)
assert_equal(montage.ch_names, info['ch_names'])
info = create_info(
montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
evoked = EvokedArray(
data=np.zeros((len(montage.ch_names), 1)), info=info, tmin=0)
evoked.set_montage(montage)
pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
assert_array_equal(pos3, montage.pos)
assert_equal(montage.ch_names, evoked.info['ch_names'])
# Warning should be raised when some EEG are not specified in the montage
with warnings.catch_warnings(record=True) as w:
info = create_info(montage.ch_names + ['foo', 'bar'], 1e3,
['eeg'] * (len(montage.ch_names) + 2))
_set_montage(info, montage)
assert_true(len(w) == 1)
def test_read_dig_montage():
"""Test read_dig_montage"""
names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
montage = read_dig_montage(hsp, hpi, elp, names, unit='m', transform=False)
elp_points = _read_dig_points(elp)
hsp_points = _read_dig_points(hsp)
hpi_points = read_mrk(hpi)
assert_equal(montage.point_names, names)
assert_array_equal(montage.elp, elp_points)
assert_array_equal(montage.hsp, hsp_points)
assert_array_equal(montage.hpi, hpi_points)
assert_array_equal(montage.dev_head_t, np.identity(4))
montage = read_dig_montage(hsp, hpi, elp, names,
transform=True, dev_head_t=True)
# check coordinate transformation
# nasion
assert_almost_equal(montage.nasion[0], 0)
assert_almost_equal(montage.nasion[2], 0)
# lpa and rpa
assert_allclose(montage.lpa[1:], 0, atol=1e-16)
assert_allclose(montage.rpa[1:], 0, atol=1e-16)
# device head transform
dev_head_t = fit_matched_points(tgt_pts=montage.elp,
src_pts=montage.hpi, out='trans')
assert_array_equal(montage.dev_head_t, dev_head_t)
def test_set_dig_montage():
"""Test applying DigMontage to inst
"""
# Extensive testing of applying `dig` to info is done in test_meas_info
# with `test_make_dig_points`.
names = ['nasion', 'lpa', 'rpa', '1', '2', '3', '4', '5']
hsp_points = _read_dig_points(hsp)
elp_points = _read_dig_points(elp)
hpi_points = read_mrk(hpi)
p0, p1, p2 = elp_points[:3]
nm_trans = get_ras_to_neuromag_trans(p0, p1, p2)
elp_points = apply_trans(nm_trans, elp_points)
nasion_point, lpa_point, rpa_point = elp_points[:3]
hsp_points = apply_trans(nm_trans, hsp_points)
montage = read_dig_montage(hsp, hpi, elp, names, unit='m', transform=True)
info = create_info(['Test Ch'], 1e3, ['eeg'])
_set_montage(info, montage)
hs = np.array([p['r'] for i, p in enumerate(info['dig'])
if p['kind'] == FIFF.FIFFV_POINT_EXTRA])
nasion_dig = np.array([p['r'] for p in info['dig']
if all([p['ident'] == FIFF.FIFFV_POINT_NASION,
p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
lpa_dig = np.array([p['r'] for p in info['dig']
if all([p['ident'] == FIFF.FIFFV_POINT_LPA,
p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
rpa_dig = np.array([p['r'] for p in info['dig']
if all([p['ident'] == FIFF.FIFFV_POINT_RPA,
p['kind'] == FIFF.FIFFV_POINT_CARDINAL])])
hpi_dig = np.array([p['r'] for p in info['dig']
if p['kind'] == FIFF.FIFFV_POINT_HPI])
assert_array_equal(hs, hsp_points)
assert_array_equal(nasion_dig.ravel(), nasion_point)
assert_array_equal(lpa_dig.ravel(), lpa_point)
assert_array_equal(rpa_dig.ravel(), rpa_point)
assert_array_equal(hpi_dig, hpi_points)
assert_array_equal(montage.dev_head_t, info['dev_head_t']['trans'])
@testing.requires_testing_data
def test_fif_dig_montage():
"""Test FIF dig montage support"""
dig_montage = read_dig_montage(fif=fif_dig_montage_fname)
# Make a BrainVision file like the one the user would have had
raw_bv = read_raw_brainvision(bv_fname, preload=True)
raw_bv_2 = raw_bv.copy()
mapping = dict()
for ii, ch_name in enumerate(raw_bv.ch_names[:-1]):
mapping[ch_name] = 'EEG%03d' % (ii + 1,)
raw_bv.rename_channels(mapping)
for ii, ch_name in enumerate(raw_bv_2.ch_names[:-1]):
mapping[ch_name] = 'EEG%03d' % (ii + 33,)
raw_bv_2.rename_channels(mapping)
raw_bv.drop_channels(['STI 014'])
raw_bv.add_channels([raw_bv_2])
# Set the montage
raw_bv.set_montage(dig_montage)
# Check the result
evoked = read_evokeds(evoked_fname)[0]
assert_equal(len(raw_bv.ch_names), len(evoked.ch_names))
for ch_py, ch_c in zip(raw_bv.info['chs'], evoked.info['chs']):
assert_equal(ch_py['ch_name'], ch_c['ch_name'].replace('EEG ', 'EEG'))
# C actually says it's unknown, but it's not (?):
# assert_equal(ch_py['coord_frame'], ch_c['coord_frame'])
assert_equal(ch_py['coord_frame'], FIFF.FIFFV_COORD_HEAD)
assert_allclose(ch_py['loc'], ch_c['loc'])
assert_dig_allclose(raw_bv.info, evoked.info)
run_tests_if_main()
| |
# Copyright (c) 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile
from urllib import quote, unquote
from xml.sax import saxutils
from time import time
from swift.common.swob import Request, HTTPBadGateway, \
HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \
HTTPLengthRequired, HTTPException, HTTPServerError, wsgify
from swift.common.utils import json, get_logger
from swift.common.constraints import check_utf8, MAX_FILE_SIZE
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
from swift.common.constraints import MAX_OBJECT_NAME_LENGTH, \
MAX_CONTAINER_NAME_LENGTH
MAX_PATH_LENGTH = MAX_OBJECT_NAME_LENGTH + MAX_CONTAINER_NAME_LENGTH + 2
class CreateContainerError(Exception):
def __init__(self, msg, status_int, status):
self.status_int = status_int
self.status = status
Exception.__init__(self, msg)
ACCEPTABLE_FORMATS = ['text/plain', 'application/json', 'application/xml',
'text/xml']
def get_response_body(data_format, data_dict, error_list):
"""
Returns a properly formatted response body according to format. Handles
json and xml, otherwise will return text/plain. Note: xml response does not
include xml declaration.
:params data_format: resulting format
:params data_dict: generated data about results.
:params error_list: list of quoted filenames that failed
"""
if data_format == 'application/json':
data_dict['Errors'] = error_list
return json.dumps(data_dict)
if data_format and data_format.endswith('/xml'):
output = '<delete>\n'
for key in sorted(data_dict.keys()):
xml_key = key.replace(' ', '_').lower()
output += '<%s>%s</%s>\n' % (xml_key, data_dict[key], xml_key)
output += '<errors>\n'
output += '\n'.join(
['<object>'
'<name>%s</name><status>%s</status>'
'</object>' % (saxutils.escape(name), status) for
name, status in error_list])
output += '</errors>\n</delete>\n'
return output
output = ''
for key in sorted(data_dict.keys()):
output += '%s: %s\n' % (key, data_dict[key])
output += 'Errors:\n'
output += '\n'.join(
['%s, %s' % (name, status)
for name, status in error_list])
return output
class Bulk(object):
"""
Middleware that will do many operations on a single request.
Extract Archive:
Expand tar files into a swift account. Request must be a PUT with the
query parameter ?extract-archive=format specifying the format of archive
file. Accepted formats are tar, tar.gz, and tar.bz2.
For a PUT to the following url:
/v1/AUTH_Account/$UPLOAD_PATH?extract-archive=tar.gz
UPLOAD_PATH is where the files will be expanded to. UPLOAD_PATH can be a
container, a pseudo-directory within a container, or an empty string. The
destination of a file in the archive will be built as follows:
/v1/AUTH_Account/$UPLOAD_PATH/$FILE_PATH
Where FILE_PATH is the file name from the listing in the tar file.
If the UPLOAD_PATH is an empty string, containers will be auto created
accordingly and files in the tar that would not map to any container (files
in the base directory) will be ignored.
Only regular files will be uploaded. Empty directories, symlinks, etc will
not be uploaded.
The response from bulk operations functions differently from other swift
responses. This is because a short request body sent from the client could
result in many operations on the proxy server and precautions need to be
made to prevent the request from timing out due to lack of activity. To
this end, the client will always receive a 200 OK response, regardless of
the actual success of the call. The body of the response must be parsed to
determine the actual success of the operation. In addition to this the
client may receive zero or more whitespace characters prepended to the
actual response body while the proxy server is completing the request.
The format of the response body defaults to text/plain but can be either
json or xml depending on the Accept header. Acceptable formats are
text/plain, application/json, application/xml, and text/xml. An example
body is as follows:
{"Response Status": "201 Created",
"Response Body": "",
"Errors": [],
"Number Files Created": 10}
If all valid files were uploaded successfully the Response Status will be
201 Created. If any files failed to be created the response code
corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on
server errors), etc. In both cases the response body will specify the
number of files successfully uploaded and a list of the files that failed.
There are proxy logs created for each file (which becomes a subrequest) in
the tar. The subrequest's proxy log will have a swift.source set to "EA"
the log's content length will reflect the unzipped size of the file. If
double proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the unexpanded size of the tar.gz).
Bulk Delete:
Will delete multiple objects or containers from their account with a
single request. Responds to DELETE requests with query parameter
?bulk-delete set. The Content-Type should be set to text/plain.
The body of the DELETE request will be a newline separated list of url
encoded objects to delete. You can delete 10,000 (configurable) objects
per request. The objects specified in the DELETE request body must be URL
encoded and in the form:
/container_name/obj_name
or for a container (which must be empty at time of delete)
/container_name
The response is similar to bulk deletes as in every response will be a 200
OK and you must parse the response body for actual results. An example
response is:
{"Number Not Found": 0,
"Response Status": "200 OK",
"Response Body": "",
"Errors": [],
"Number Deleted": 6}
If all items were successfully deleted (or did not exist), the Response
Status will be 200 OK. If any failed to delete, the response code
corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on
server errors), etc. In all cases the response body will specify the number
of items successfully deleted, not found, and a list of those that failed.
The return body will be formatted in the way specified in the request's
Accept header. Acceptable formats are text/plain, application/json,
application/xml, and text/xml.
There are proxy logs created for each object or container (which becomes a
subrequest) that is deleted. The subrequest's proxy log will have a
swift.source set to "BD" the log's content length of 0. If double
proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the list of objects/containers to be deleted).
"""
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='bulk')
self.max_containers = int(
conf.get('max_containers_per_extraction', 10000))
self.max_failed_extractions = int(
conf.get('max_failed_extractions', 1000))
self.max_deletes_per_request = int(
conf.get('max_deletes_per_request', 10000))
self.yield_frequency = int(conf.get('yield_frequency', 60))
def create_container(self, req, container_path):
"""
Makes a subrequest to create a new container.
:params container_path: an unquoted path to a container to be created
:returns: None on success
:raises: CreateContainerError on creation error
"""
new_env = req.environ.copy()
new_env['PATH_INFO'] = container_path
new_env['swift.source'] = 'EA'
create_cont_req = Request.blank(container_path, environ=new_env)
resp = create_cont_req.get_response(self.app)
if resp.status_int // 100 != 2:
raise CreateContainerError(
"Create Container Failed: " + container_path,
resp.status_int, resp.status)
def get_objs_to_delete(self, req):
"""
Will populate objs_to_delete with data from request input.
:params req: a Swob request
:returns: a list of the contents of req.body when separated by newline.
:raises: HTTPException on failures
"""
line = ''
data_remaining = True
objs_to_delete = []
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
raise HTTPLengthRequired(request=req)
while data_remaining:
if '\n' in line:
obj_to_delete, line = line.split('\n', 1)
objs_to_delete.append(unquote(obj_to_delete))
else:
data = req.body_file.read(MAX_PATH_LENGTH)
if data:
line += data
else:
data_remaining = False
if line.strip():
objs_to_delete.append(unquote(line))
if len(objs_to_delete) > self.max_deletes_per_request:
raise HTTPRequestEntityTooLarge(
'Maximum Bulk Deletes: %d per request' %
self.max_deletes_per_request)
if len(line) > MAX_PATH_LENGTH * 2:
raise HTTPBadRequest('Invalid File Name')
return objs_to_delete
def handle_delete_iter(self, req, objs_to_delete=None,
user_agent='BulkDelete', swift_source='BD',
out_content_type='text/plain'):
"""
A generator that can be assigned to a swob Response's app_iter which,
when iterated over, will delete the objects specified in request body.
Will occasionally yield whitespace while request is being processed.
When the request is completed will yield a response body that can be
parsed to determine success. See above documentation for details.
:params req: a swob Request
:params objs_to_delete: a list of dictionaries that specifies the
objects to be deleted. If None, uses self.get_objs_to_delete to
query request.
"""
last_yield = time()
separator = ''
failed_files = []
resp_dict = {'Response Status': HTTPOk().status,
'Response Body': '',
'Number Deleted': 0,
'Number Not Found': 0}
try:
if not out_content_type:
raise HTTPNotAcceptable(request=req)
if out_content_type.endswith('/xml'):
yield '<?xml version="1.0" encoding="UTF-8"?>\n'
try:
vrs, account, _junk = req.split_path(2, 3, True)
except ValueError:
raise HTTPNotFound(request=req)
incoming_format = req.headers.get('Content-Type')
if incoming_format and \
not incoming_format.startswith('text/plain'):
# For now only accept newline separated object names
raise HTTPNotAcceptable(request=req)
if objs_to_delete is None:
objs_to_delete = self.get_objs_to_delete(req)
failed_file_response_type = HTTPBadRequest
req.environ['eventlet.minimum_write_chunk_size'] = 0
for obj_to_delete in objs_to_delete:
if last_yield + self.yield_frequency < time():
separator = '\r\n\r\n'
last_yield = time()
yield ' '
obj_to_delete = obj_to_delete.strip()
if not obj_to_delete:
continue
delete_path = '/'.join(['', vrs, account,
obj_to_delete.lstrip('/')])
if not check_utf8(delete_path):
failed_files.append([quote(obj_to_delete),
HTTPPreconditionFailed().status])
continue
new_env = req.environ.copy()
new_env['PATH_INFO'] = delete_path
del(new_env['wsgi.input'])
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s %s' % (req.environ.get('HTTP_USER_AGENT'), user_agent)
new_env['swift.source'] = swift_source
delete_obj_req = Request.blank(delete_path, new_env)
resp = delete_obj_req.get_response(self.app)
if resp.status_int // 100 == 2:
resp_dict['Number Deleted'] += 1
elif resp.status_int == HTTP_NOT_FOUND:
resp_dict['Number Not Found'] += 1
elif resp.status_int == HTTP_UNAUTHORIZED:
failed_files.append([quote(obj_to_delete),
HTTPUnauthorized().status])
raise HTTPUnauthorized(request=req)
else:
if resp.status_int // 100 == 5:
failed_file_response_type = HTTPBadGateway
failed_files.append([quote(obj_to_delete), resp.status])
if failed_files:
resp_dict['Response Status'] = \
failed_file_response_type().status
elif not (resp_dict['Number Deleted'] or
resp_dict['Number Not Found']):
resp_dict['Response Status'] = HTTPBadRequest().status
resp_dict['Response Body'] = 'Invalid bulk delete.'
except HTTPException, err:
resp_dict['Response Status'] = err.status
resp_dict['Response Body'] = err.body
except Exception:
self.logger.exception('Error in bulk delete.')
resp_dict['Response Status'] = HTTPServerError().status
yield separator + get_response_body(out_content_type,
resp_dict, failed_files)
def handle_extract_iter(self, req, compress_type,
out_content_type='text/plain'):
"""
A generator that can be assigned to a swob Response's app_iter which,
when iterated over, will extract and PUT the objects pulled from the
request body. Will occasionally yield whitespace while request is being
processed. When the request is completed will yield a response body
that can be parsed to determine success. See above documentation for
details.
:params req: a swob Request
:params compress_type: specifying the compression type of the tar.
Accepts '', 'gz', or 'bz2'
"""
resp_dict = {'Response Status': HTTPCreated().status,
'Response Body': '', 'Number Files Created': 0}
failed_files = []
last_yield = time()
separator = ''
existing_containers = set()
try:
if not out_content_type:
raise HTTPNotAcceptable(request=req)
if out_content_type.endswith('/xml'):
yield '<?xml version="1.0" encoding="UTF-8"?>\n'
if req.content_length is None and \
req.headers.get('transfer-encoding',
'').lower() != 'chunked':
raise HTTPLengthRequired(request=req)
try:
vrs, account, extract_base = req.split_path(2, 3, True)
except ValueError:
raise HTTPNotFound(request=req)
extract_base = extract_base or ''
extract_base = extract_base.rstrip('/')
tar = tarfile.open(mode='r|' + compress_type,
fileobj=req.body_file)
failed_response_type = HTTPBadRequest
req.environ['eventlet.minimum_write_chunk_size'] = 0
while True:
if last_yield + self.yield_frequency < time():
separator = '\r\n\r\n'
last_yield = time()
yield ' '
tar_info = tar.next()
if tar_info is None or \
len(failed_files) >= self.max_failed_extractions:
break
if tar_info.isfile():
obj_path = tar_info.name
if obj_path.startswith('./'):
obj_path = obj_path[2:]
obj_path = obj_path.lstrip('/')
if extract_base:
obj_path = extract_base + '/' + obj_path
if '/' not in obj_path:
continue # ignore base level file
destination = '/'.join(
['', vrs, account, obj_path])
container = obj_path.split('/', 1)[0]
if not check_utf8(destination):
failed_files.append(
[quote(obj_path[:MAX_PATH_LENGTH]),
HTTPPreconditionFailed().status])
continue
if tar_info.size > MAX_FILE_SIZE:
failed_files.append([
quote(obj_path[:MAX_PATH_LENGTH]),
HTTPRequestEntityTooLarge().status])
continue
if container not in existing_containers:
try:
self.create_container(
req, '/'.join(['', vrs, account, container]))
existing_containers.add(container)
except CreateContainerError, err:
failed_files.append([
quote(obj_path[:MAX_PATH_LENGTH]),
err.status])
if err.status_int == HTTP_UNAUTHORIZED:
raise HTTPUnauthorized(request=req)
continue
except ValueError:
failed_files.append([
quote(obj_path[:MAX_PATH_LENGTH]),
HTTPBadRequest().status])
continue
if len(existing_containers) > self.max_containers:
raise HTTPBadRequest(
'More than %d base level containers in tar.' %
self.max_containers)
tar_file = tar.extractfile(tar_info)
new_env = req.environ.copy()
new_env['wsgi.input'] = tar_file
new_env['PATH_INFO'] = destination
new_env['CONTENT_LENGTH'] = tar_info.size
new_env['swift.source'] = 'EA'
new_env['HTTP_USER_AGENT'] = \
'%s BulkExpand' % req.environ.get('HTTP_USER_AGENT')
create_obj_req = Request.blank(destination, new_env)
resp = create_obj_req.get_response(self.app)
if resp.status_int // 100 == 2:
resp_dict['Number Files Created'] += 1
else:
if resp.status_int == HTTP_UNAUTHORIZED:
failed_files.append([
quote(obj_path[:MAX_PATH_LENGTH]),
HTTPUnauthorized().status])
raise HTTPUnauthorized(request=req)
if resp.status_int // 100 == 5:
failed_response_type = HTTPBadGateway
failed_files.append([
quote(obj_path[:MAX_PATH_LENGTH]), resp.status])
if failed_files:
resp_dict['Response Status'] = failed_response_type().status
elif not resp_dict['Number Files Created']:
resp_dict['Response Status'] = HTTPBadRequest().status
resp_dict['Response Body'] = 'Invalid Tar File: No Valid Files'
except HTTPException, err:
resp_dict['Response Status'] = err.status
resp_dict['Response Body'] = err.body
except tarfile.TarError, tar_error:
resp_dict['Response Status'] = HTTPBadRequest().status
resp_dict['Response Body'] = 'Invalid Tar File: %s' % tar_error
except Exception:
self.logger.exception('Error in extract archive.')
resp_dict['Response Status'] = HTTPServerError().status
yield separator + get_response_body(
out_content_type, resp_dict, failed_files)
@wsgify
def __call__(self, req):
extract_type = req.params.get('extract-archive')
resp = None
if extract_type is not None and req.method == 'PUT':
archive_type = {
'tar': '', 'tar.gz': 'gz',
'tar.bz2': 'bz2'}.get(extract_type.lower().strip('.'))
if archive_type is not None:
resp = HTTPOk(request=req)
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if out_content_type:
resp.content_type = out_content_type
resp.app_iter = self.handle_extract_iter(
req, archive_type, out_content_type=out_content_type)
else:
resp = HTTPBadRequest("Unsupported archive format")
if 'bulk-delete' in req.params and req.method == 'DELETE':
resp = HTTPOk(request=req)
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if out_content_type:
resp.content_type = out_content_type
resp.app_iter = self.handle_delete_iter(
req, out_content_type=out_content_type)
return resp or self.app
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def bulk_filter(app):
return Bulk(app, conf)
return bulk_filter
| |
#!/usr/bin/env python
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import (print_function, unicode_literals)
import sys, os
import json
import argparse
import re
from collections import OrderedDict
import dxpy
from dxpy.templating.utils import (print_intro, get_name, get_version, get_metadata, Completer, get_ordinal_str,
prompt_for_var, prompt_for_yn, use_completer, get_language, language_options,
get_pattern, get_timeout, fill_in_name_and_ver, clean, create_files_from_templates)
from dxpy.utils.printing import fill, BOLD, UNDERLINE, DNANEXUS_LOGO, ENDC
from dxpy.app_categories import APP_CATEGORIES
from dxpy.utils.completer import InstanceTypesCompleter
from dxpy.utils.pretty_print import format_table
from dxpy.compat import wrap_stdio_in_codecs
wrap_stdio_in_codecs()
try:
import colorama
colorama.init()
except:
pass
IO_NAME_PATTERN = re.compile('^[a-zA-Z_][0-9a-zA-Z_]*$')
DEFAULT_REGION_AWS = 'aws:us-east-1'
DEFAULT_REGION_AZURE = 'azure:westus'
API_VERSION = '1.0.0'
parser = argparse.ArgumentParser(description="Create a source code directory for a DNAnexus app. You will be prompted for various metadata for the app as well as for its input and output specifications.")
parser.add_argument('--json-file', help='Use the metadata and IO spec found in the given file')
parser.add_argument('--language', help='Programming language of your app')
parser.add_argument('--template',
choices=["basic", "parallelized", "scatter-process-gather"], default='basic',
help='Execution pattern of your app')
parser.add_argument('name', help='Name of your app', nargs='?')
args = parser.parse_args()
if args.json_file is not None and not os.path.exists(args.json_file):
parser.error('File not found: ' + args.json_file)
def main(**kwargs):
"""
Entry point for dx-app-wizard.
Note that this function is not meant to be used as a subroutine in your program.
"""
manifest = []
print_intro(API_VERSION)
if args.json_file is not None:
with open(args.json_file, 'r') as json_file:
app_json = json.loads(json_file.read())
# Re-confirm the name
name = get_name(default=args.name or app_json.get('name'))
app_json['name'] = name
version = get_version(default=app_json.get('version'))
app_json['version'] = version
try:
os.mkdir(app_json['name'])
except:
sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % app_json['name']) + '\n')
sys.exit(1)
else:
##################
# BASIC METADATA #
##################
name = get_name(default=args.name)
try:
os.mkdir(name)
except:
sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % name) + '\n')
sys.exit(1)
title, summary = get_metadata(API_VERSION)
version = get_version()
app_json = OrderedDict()
app_json["name"] = name
app_json["title"] = title or name
app_json['summary'] = summary or name
app_json["dxapi"] = API_VERSION
app_json["version"] = version
############
# IO SPECS #
############
class_completer = Completer(['int', 'float', 'string', 'boolean', 'hash',
'array:int', 'array:float', 'array:string', 'array:boolean',
'record', 'file', 'applet',
'array:record', 'array:file', 'array:applet'])
bool_completer = Completer(['true', 'false'])
print('')
print(BOLD() + 'Input Specification' + ENDC())
print('')
input_spec = True
input_names = []
printed_classes = False
if input_spec:
app_json['inputSpec'] = []
print(fill('You will now be prompted for each input parameter to your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['inputSpec']) + 1)
input_name = prompt_for_var(ordinal + ' input name (<ENTER> to finish)', allow_empty=True)
if input_name == '':
break
if input_name in input_names:
print(fill('Error: Cannot use the same input parameter name twice. Please choose again.'))
continue
if not IO_NAME_PATTERN.match(input_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue
input_names.append(input_name)
input_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your input parameter must be of one of the following classes:')
print('''applet array:file array:record file int
array:applet array:float array:string float record
array:boolean array:int boolean hash string
''')
printed_classes = True
while True:
input_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if input_class in class_completer.choices:
break
else:
print(fill('Not a recognized class; please choose again.'))
use_completer()
optional = prompt_for_yn('This is an optional parameter')
default_val = None
if optional and input_class in ['int', 'float', 'string', 'boolean']:
default_val = prompt_for_yn('A default value should be provided')
if default_val:
while True:
if input_class == 'boolean':
use_completer(bool_completer)
default_val = prompt_for_var(' Default value', choices=['true', 'false'])
use_completer()
elif input_class == 'string':
default_val = prompt_for_var(' Default value', allow_empty=True)
else:
default_val = prompt_for_var(' Default value')
try:
if input_class == 'boolean':
default_val = (default_val == 'true')
elif input_class == 'int':
default_val = int(default_val)
elif input_class == 'float':
default_val = float(default_val)
break
except:
print('Not a valid default value for the given class ' + input_class)
else:
default_val = None
# Fill in the input parameter's JSON
parameter_json = OrderedDict()
parameter_json["name"] = input_name
if input_label != '':
parameter_json['label'] = input_label
parameter_json["class"] = input_class
parameter_json["optional"] = optional
if default_val is not None:
parameter_json['default'] = default_val
# Fill in patterns and blank help string
if input_class == 'file' or input_class == 'array:file':
parameter_json["patterns"] = ["*"]
parameter_json["help"] = ""
app_json['inputSpec'].append(parameter_json)
print('')
print(BOLD() + 'Output Specification' + ENDC())
print('')
output_spec = True
output_names = []
if output_spec:
app_json['outputSpec'] = []
print(fill('You will now be prompted for each output parameter of your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['outputSpec']) + 1)
output_name = prompt_for_var(ordinal + ' output name (<ENTER> to finish)', allow_empty=True)
if output_name == '':
break
if output_name in output_names:
print(fill('Error: Cannot use the same output parameter name twice. Please choose again.'))
continue
if not IO_NAME_PATTERN.match(output_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue
output_names.append(output_name)
output_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your output parameter must be of one of the following classes:')
print('''applet array:file array:record file int
array:applet array:float array:string float record
array:boolean array:int boolean hash string''')
printed_classes = True
while True:
output_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if output_class in class_completer.choices:
break
else:
print(fill('Not a recognized class; please choose again.'))
use_completer()
# Fill in the output parameter's JSON
parameter_json = OrderedDict()
parameter_json["name"] = output_name
if output_label != '':
parameter_json['label'] = output_label
parameter_json["class"] = output_class
# Fill in patterns and blank help string
if output_class == 'file' or output_class == 'array:file':
parameter_json["patterns"] = ["*"]
parameter_json["help"] = ""
app_json['outputSpec'].append(parameter_json)
required_file_input_names = []
optional_file_input_names = []
required_file_array_input_names = []
optional_file_array_input_names = []
file_output_names = []
if 'inputSpec' in app_json:
for param in app_json['inputSpec']:
may_be_missing = param.get('optional') and "default" not in param
if param['class'] == 'file':
param_list = optional_file_input_names if may_be_missing else required_file_input_names
elif param['class'] == 'array:file':
param_list = optional_file_array_input_names if may_be_missing else required_file_array_input_names
else:
param_list = None
if param_list is not None:
param_list.append(param['name'])
if 'outputSpec' in app_json:
file_output_names = [param['name'] for param in app_json['outputSpec'] if param['class'] == 'file']
##################
# TIMEOUT POLICY #
##################
print('')
print(BOLD() + 'Timeout Policy' + ENDC())
app_json["runSpec"] = OrderedDict({})
app_json['runSpec'].setdefault('timeoutPolicy', {})
timeout, timeout_units = get_timeout(default=app_json['runSpec']['timeoutPolicy'].get('*'))
app_json['runSpec']['timeoutPolicy'].setdefault('*', {})
app_json['runSpec']['timeoutPolicy']['*'].setdefault(timeout_units, timeout)
########################
# LANGUAGE AND PATTERN #
########################
print('')
print(BOLD() + 'Template Options' + ENDC())
# Prompt for programming language if not specified
language = args.language if args.language is not None else get_language()
interpreter = language_options[language].get_interpreter()
app_json["runSpec"]["interpreter"] = interpreter
# Prompt the execution pattern iff the args.pattern is provided and invalid
template_dir = os.path.join(os.path.dirname(dxpy.__file__), 'templating', 'templates', language_options[language].get_path())
if not os.path.isdir(os.path.join(template_dir, args.template)):
print(fill('The execution pattern "' + args.template + '" is not available for your programming language.'))
pattern = get_pattern(template_dir)
else:
pattern = args.template
template_dir = os.path.join(template_dir, pattern)
with open(os.path.join(template_dir, 'dxapp.json'), 'r') as template_app_json_file:
file_text = fill_in_name_and_ver(template_app_json_file.read(), name, version)
template_app_json = json.loads(file_text)
for key in template_app_json['runSpec']:
app_json['runSpec'][key] = template_app_json['runSpec'][key]
if (language == args.language) and (pattern == args.template):
print('All template options are supplied in the arguments.')
##########################
# APP ACCESS PERMISSIONS #
##########################
print('')
print(BOLD('Access Permissions'))
print(fill('''If you request these extra permissions for your app, users will see this fact when launching your app, and certain other restrictions will apply. For more information, see ''' +
BOLD('https://documentation.dnanexus.com/developer/apps/app-permissions') + '.'))
print('')
print(fill(UNDERLINE('Access to the Internet') + ' (other than accessing the DNAnexus API).'))
if prompt_for_yn("Will this app need access to the Internet?", default=False):
app_json.setdefault('access', {})
app_json['access']['network'] = ['*']
print(fill('App has full access to the Internet. To narrow access to specific sites, edit the ' +
UNDERLINE('access.network') + ' field of dxapp.json once we generate the app.'))
print('')
print(fill(UNDERLINE('Direct access to the parent project') + '''. This is not needed if your app specifies outputs,
which will be copied into the project after it's done running.'''))
if prompt_for_yn("Will this app need access to the parent project?", default=False):
app_json.setdefault('access', {})
app_json['access']['project'] = 'CONTRIBUTE'
print(fill('App has CONTRIBUTE access to the parent project. To change the access level or request access to ' +
'other projects, edit the ' + UNDERLINE('access.project') + ' and ' + UNDERLINE('access.allProjects') +
' fields of dxapp.json once we generate the app.'))
#######################
# SYSTEM REQUIREMENTS #
#######################
print('')
print(BOLD('System Requirements'))
print('')
print(BOLD('Common AWS instance types:'))
print(format_table(InstanceTypesCompleter.aws_preferred_instance_types.values(),
column_names=list(InstanceTypesCompleter.instance_types.values())[0]._fields))
print(BOLD('Common Azure instance types:'))
print(format_table(InstanceTypesCompleter.azure_preferred_instance_types.values(),
column_names=list(InstanceTypesCompleter.instance_types.values())[0]._fields))
print(fill(BOLD('Default instance type:') + ' The instance type you select here will apply to all entry points in ' +
'your app unless you override it. See ' +
BOLD('https://documentation.dnanexus.com/developer/api/running-analyses/instance-types') + ' for more information.'))
use_completer(InstanceTypesCompleter())
instance_type = prompt_for_var('Choose an instance type for your app',
default=InstanceTypesCompleter.default_instance_type.Name,
choices=list(InstanceTypesCompleter.instance_types))
target_region = DEFAULT_REGION_AWS
if instance_type in InstanceTypesCompleter.azure_preferred_instance_types.keys():
target_region = DEFAULT_REGION_AZURE
app_json['regionalOptions'] = OrderedDict({})
app_json['regionalOptions'][target_region] = OrderedDict({})
app_json['regionalOptions'][target_region].setdefault('systemRequirements', {})
app_json['regionalOptions'][target_region]['systemRequirements'].setdefault('*', {})
app_json['regionalOptions'][target_region]['systemRequirements']['*']['instanceType'] = instance_type
######################
# HARDCODED DEFAULTS #
######################
app_json['runSpec']['distribution'] = 'Ubuntu'
app_json['runSpec']['release'] = '20.04'
app_json['runSpec']['version'] = "0"
#################
# WRITING FILES #
#################
print('')
print(BOLD() + '*** Generating ' + DNANEXUS_LOGO() + BOLD() + ' App Template... ***' + ENDC())
with open(os.path.join(name, 'dxapp.json'), 'w') as prog_file:
prog_file.write(clean(json.dumps(app_json, indent=2)) + '\n')
manifest.append(os.path.join(name, 'dxapp.json'))
print('')
print(fill('''Your app specification has been written to the
dxapp.json file. You can specify more app options by editing this file
directly (see https://documentation.dnanexus.com/developer for complete
documentation).''' + (''' Note that without an input and output specification,
your app can only be built as an APPLET on the system. To publish it to
the DNAnexus community, you must first specify your inputs and outputs.
''' if not ('inputSpec' in app_json and 'outputSpec' in app_json) else "")))
print('')
for subdir in 'src', 'test', 'resources':
try:
os.mkdir(os.path.join(name, subdir))
manifest.append(os.path.join(name, subdir, ''))
except:
sys.stderr.write("Unable to create subdirectory %s/%s" % (name, subdir))
sys.exit(1)
entry_points = ['main']
if pattern == 'parallelized':
entry_points = ['main', 'process', 'postprocess']
elif pattern == 'scatter-process-gather':
entry_points = ['main', 'scatter', 'map', 'process', 'postprocess']
manifest += create_files_from_templates(template_dir, app_json, language,
required_file_input_names, optional_file_input_names,
required_file_array_input_names, optional_file_array_input_names,
file_output_names, pattern,
description='<!-- Insert a description of your app here -->',
entry_points=entry_points)
print("Created files:")
for filename in sorted(manifest):
print("\t", filename)
print("\n" + fill('''App directory created! See
https://documentation.dnanexus.com/developer for tutorials on how to modify these files,
or run "dx build {n}" or "dx build --create-app {n}" while logged in with dx.'''.format(n=name)) + "\n")
print(fill('''Running the DNAnexus build utility will create an executable on the DNAnexus platform. Any files found in the ''' +
BOLD() + 'resources' + ENDC() +
''' directory will be uploaded so that they will be present in the root directory when the executable is run.'''))
if __name__ == '__main__':
main()
| |
import os
import sys
import unittest
from forestdb import ForestDB
from forestdb import TransactionException
DB_FILE = 'forest-test.db'
class BaseTestCase(unittest.TestCase):
def setUp(self):
if os.path.exists(DB_FILE):
os.unlink(DB_FILE)
self.db = self.get_db()
self.kv = self.db['kv-1']
def tearDown(self):
self.db.close()
if os.path.exists(DB_FILE):
os.unlink(DB_FILE)
def get_db(self):
return ForestDB(DB_FILE)
class TestForestDB(BaseTestCase):
def test_db_open_close(self):
self.kv['k1'] = 'v1'
kv2 = self.db['kv2']
kv2['k1'] = 'v2'
self.kv.close()
kv2.close()
self.db.close()
self.db.open()
self.kv.open()
kv2.open()
self.assertEqual(self.kv['k1'], 'v1')
self.assertEqual(kv2['k1'], 'v2')
def test_autocommit_off(self):
self.kv['k1'] = 'v1'
self.kv.close()
self.db.close()
self.db.autocommit = False
self.db.open()
self.kv.open()
self.assertEqual(self.kv['k1'], 'v1')
self.kv['k1'] = 'v1-e'
self.kv['k2'] = 'v2'
self.kv.close()
self.db.close()
self.db.open()
self.kv.open()
self.assertEqual(self.kv['k1'], 'v1')
self.assertRaises(KeyError, lambda: self.kv['k2'])
def test_kvs_names(self):
self.assertEqual(self.db.get_kv_names(), ['default', 'kv-1'])
self.db.kv('kv-2')
self.db.kv('kv-3')
self.assertEqual(self.db.get_kv_names(),
['default', 'kv-1', 'kv-2', 'kv-3'])
class TestDBEncryption(BaseTestCase):
def get_db(self):
return ForestDB(DB_FILE, encryption_key='testing')
def test_encryption(self):
val = 'value-testing-encryption'
self.kv['k1'] = val
self.assertEqual(self.kv['k1'], val)
self.kv.close()
self.db.close()
self.db.open()
self.kv.open()
self.assertEqual(self.kv['k1'], val)
self.db.close()
with open(DB_FILE, 'rb') as fh:
data = fh.read()
self.assertFalse(val in data)
class TestDBInfo(BaseTestCase):
def setUp(self):
super(TestDBInfo, self).setUp()
self.kv2 = self.db.kv('kv2')
self.kv.update(k1='v1', k2='v2', k3='v3', k4='v4')
del self.kv['k4']
self.kv2.update(k1='v1-2', k2='v2-2')
def test_db_info(self):
info = self.db.info()
self.assertEqual(info['num_kv_stores'], 3)
self.assertEqual(info['doc_count'], 5)
self.assertEqual(info['deleted_count'], 1)
def test_kv_info(self):
info = self.kv.info()
self.assertEqual(info['deleted_count'], 1)
self.assertEqual(info['doc_count'], 3)
self.assertEqual(info['last_seqnum'], 5)
info = self.kv2.info()
self.assertEqual(info['deleted_count'], 0)
self.assertEqual(info['doc_count'], 2)
self.assertEqual(info['last_seqnum'], 2)
def test_kv_ops_info(self):
info = self.kv.ops_info()
self.assertEqual(info['num_sets'], 4)
self.assertEqual(info['num_dels'], 1)
self.assertEqual(info['num_commits'], 3)
self.assertEqual(info['num_gets'], 0)
info = self.kv2.ops_info()
self.assertEqual(info['num_sets'], 2)
self.assertEqual(info['num_dels'], 0)
self.assertEqual(info['num_commits'], 3)
self.assertEqual(info['num_gets'], 0)
self.kv2['k1']
try:
self.kv2['kx']
except KeyError:
pass
info = self.kv2.ops_info()
self.assertEqual(info['num_sets'], 2)
self.assertEqual(info['num_dels'], 0)
self.assertEqual(info['num_commits'], 3)
self.assertEqual(info['num_gets'], 2)
class TestKVOperations(BaseTestCase):
def test_get_set_del(self):
self.kv.set('k1', 'v1')
self.kv.set('k2', 'v2')
self.assertEqual(self.kv.get('k1'), 'v1')
self.assertEqual(self.kv.get('k2'), 'v2')
self.assertIsNone(self.kv.get('k3'))
self.kv.delete('k1')
self.assertIsNone(self.kv.get('k1'))
# Can delete non-existant keys.
self.kv.delete('k3')
def test_dict_api(self):
self.kv['k1'] = 'v1'
self.kv['k2'] = 'v2'
self.assertEqual(self.kv['k1'], 'v1')
self.assertEqual(self.kv['k2'], 'v2')
self.assertRaises(KeyError, lambda: self.kv['k3'])
del self.kv['k1']
self.assertRaises(KeyError, lambda: self.kv['k1'])
# Can delete non-existant keys.
del self.kv['k3']
def test_update(self):
self.kv.update(k1='v1', k2='v2', k3='v3')
self.assertEqual(self.kv['k1'], 'v1')
self.assertEqual(self.kv['k2'], 'v2')
self.assertEqual(self.kv['k3'], 'v3')
def test_empty_values(self):
self.kv['k1'] = ''
self.assertEqual(self.kv['k1'], '')
def test_seqnum(self):
self.kv['k1'] = 'v1'
self.kv['k2'] = 'v2'
seq = self.kv.last_seqnum()
body = self.kv.get_by_seqnum(seq)
self.assertEqual(body, 'v2')
body = self.kv.get_by_seqnum(seq - 1)
self.assertEqual(body, 'v1')
def assertSlice(self, range_iter, expected):
self.assertEqual([result for result in range_iter], expected)
def test_get_range(self):
self.kv.update(aa='r1', bb='r2', bbb='r3', dd='r4', ee='r5', gg='r6')
self.assertSlice(self.kv['bb':'ee'], [
('bb', 'r2'),
('bbb', 'r3'),
('dd', 'r4'),
('ee', 'r5'),
])
self.assertSlice(self.kv['cc':'ff'], [
('dd', 'r4'),
('ee', 'r5'),
])
self.assertSlice(self.kv[:'cc'], [
('aa', 'r1'),
('bb', 'r2'),
('bbb', 'r3'),
])
self.assertSlice(self.kv['cc':], [
('dd', 'r4'),
('ee', 'r5'),
('gg', 'r6'),
])
self.assertSlice(self.kv[:], [
('aa', 'r1'),
('bb', 'r2'),
('bbb', 'r3'),
('dd', 'r4'),
('ee', 'r5'),
('gg', 'r6'),
])
self.assertSlice(self.kv['cc1':'cc2'], [])
self.assertSlice(self.kv[:'\x01'], [])
self.assertSlice(self.kv['\xff':], [])
def test_get_range_reverse(self):
self.kv.update(aa='r1', bb='r2', bbb='r3', dd='r4', ee='r5', gg='r6')
# Reverse is implied.
self.assertSlice(self.kv['ee':'bb'], [
('ee', 'r5'),
('dd', 'r4'),
('bbb', 'r3'),
('bb', 'r2'),
])
self.assertSlice(self.kv['bb':'ee':True], [
('ee', 'r5'),
('dd', 'r4'),
('bbb', 'r3'),
('bb', 'r2'),
])
self.assertSlice(self.kv['ff':'cc'], [
('ee', 'r5'),
('dd', 'r4'),
])
self.assertSlice(self.kv[:'cc':True], [
('bbb', 'r3'),
('bb', 'r2'),
('aa', 'r1'),
])
self.assertSlice(self.kv['cc'::True], [
('gg', 'r6'),
('ee', 'r5'),
('dd', 'r4'),
])
self.assertSlice(self.kv[::True], [
('gg', 'r6'),
('ee', 'r5'),
('dd', 'r4'),
('bbb', 'r3'),
('bb', 'r2'),
('aa', 'r1'),
])
self.assertSlice(self.kv['cc2':'cc1'], [])
self.assertSlice(self.kv[:'\x01':True], [])
self.assertSlice(self.kv['\xff'::True], [])
def keys_values_iterators(self):
K = self.kv
K.update(aa='r1', bb='r2', dd='r3', ee='r4')
self.assertEqual(list(K.keys()), ['aa', 'bb', 'dd', 'ee'])
self.assertEqual(list(K.keys(reverse=True)),
['ee', 'dd', 'bb', 'aa'])
self.assertEqual(list(K.keys(start='aa2')), ['bb', 'dd', 'ee'])
self.assertEqual(list(K.keys(start='cc', reverse=True)),
['bb', 'aa'])
self.assertEqual(list(K.keys(start='\xff')), [])
self.assertEqual(list(K.values()), ['r1', 'r2', 'r3', 'r4'])
self.assertEqual(list(K.values(reverse=True)),
['r4', 'r3', 'r2', 'r1'])
self.assertEqual(list(K.values(start='aa2')), ['r2', 'r3', 'r4'])
self.assertEqual(list(K.values(start='cc', reverse=True)),
['r2', 'r1'])
self.assertEqual(list(K.keys(start='\x01', reverse=True)), [])
def test_delete_range(self):
for i in range(1, 10):
self.kv['k%s' % i] = 'v%s' % i
del self.kv['k2':'k55']
self.assertEqual([key for key in self.kv.keys()], [
'k1', 'k6', 'k7', 'k8', 'k9'])
class TestDocument(BaseTestCase):
def test_document_properties(self):
doc1 = self.kv.document('k1', 'm1', 'v1')
doc2 = self.kv.document('k2')
doc3 = self.kv.document(_create=False)
self.assertEqual(doc1.key, 'k1')
self.assertEqual(doc1.meta, 'm1')
self.assertEqual(doc1.body, 'v1')
self.assertEqual(doc2.key, 'k2')
self.assertEqual(doc2.meta, '')
self.assertEqual(doc2.body, '')
self.assertRaises(ValueError, lambda: doc3.key)
self.assertRaises(ValueError, lambda: doc3.meta)
self.assertRaises(ValueError, lambda: doc3.body)
def test_document_get_set_del(self):
doc1 = self.kv.document('k1', 'm1', 'v1')
doc2 = self.kv.document('k2', 'm2', 'v2')
doc1.insert()
doc2.insert()
# Seqnum is populated.
self.assertEqual(doc1.seqnum, 1)
self.assertEqual(doc2.seqnum, 2)
# Data is stored.
self.assertEqual(self.kv['k1'], 'v1')
self.assertEqual(self.kv['k2'], 'v2')
# Empty doc created with key initialized.
doc1_db = self.kv.document('k1')
self.assertEqual(doc1_db.meta, '')
self.assertEqual(doc1_db.body, '')
# Retrieve from db, fields are populated.
doc1_db.get()
self.assertEqual(doc1_db.seqnum, 1)
self.assertEqual(doc1_db.meta, 'm1')
self.assertEqual(doc1_db.body, 'v1')
# Retrieve only metadata.
doc2_db = self.kv.document('k2')
doc2_db.get_metadata()
self.assertEqual(doc2_db.seqnum, 2)
self.assertEqual(doc2_db.meta, 'm2')
# Can load by seqnum.
doc1_seq = self.kv.document(seqnum=1)
doc1_seq.get_by_seqnum()
self.assertEqual(doc1_seq.seqnum, 1)
self.assertEqual(doc1_seq.key, 'k1')
self.assertEqual(doc1_seq.meta, 'm1')
self.assertEqual(doc1_seq.body, 'v1')
# Can load metadata by seqnum.
doc2_seq = self.kv.document(seqnum=2)
doc2_seq.get_metadata_by_seqnum()
self.assertEqual(doc2_seq.seqnum, 2)
self.assertEqual(doc2_seq.key, 'k2')
self.assertEqual(doc2_seq.meta, 'm2')
# Delete works.
doc1_db.delete()
self.assertRaises(KeyError, lambda: self.kv['k1'])
def test_missing_docs(self):
# Attempt to get missing key.
d = self.kv.document('k1')
self.assertRaises(KeyError, d.get)
# Attempt to get missing seqnum.
d = self.kv.document(seqnum=10)
self.assertRaises(KeyError, d.get_by_seqnum)
def test_insert_missing(self):
d = self.kv.document()
self.assertRaises(Exception, d.insert)
class TestTransaction(BaseTestCase):
def test_transaction(self):
with self.db.transaction():
self.kv['k1'] = 'v1'
self.kv['k2'] = 'v2'
self.kv['k3'] = 'v3'
self.assertEqual(self.kv['k1'], 'v1')
self.assertEqual(self.kv['k2'], 'v2')
with self.db.transaction() as txn:
self.kv['k1'] = 'v1-e'
self.kv['k2'] = 'v2-e'
self.assertEqual(self.kv['k1'], 'v1-e')
self.assertEqual(self.kv['k2'], 'v2-e')
with self.db.transaction() as txn:
del self.kv['k2']
self.assertRaises(KeyError, lambda: self.kv['k2'])
self.assertRaises(KeyError, lambda: self.kv['k2'])
def test_rollback(self):
self.kv['k1'] = 'v1'
self.kv['k2'] = 'v2'
self.kv['k3'] = 'v3'
with self.db.transaction() as txn:
self.kv['k1'] = 'v1-e'
self.kv['k2'] = 'v2-e'
del self.kv['k3']
txn.rollback()
self.assertEqual(self.kv['k1'], 'v1')
self.assertEqual(self.kv['k2'], 'v2')
self.assertEqual(self.kv['k3'], 'v3')
def test_commit_rollback(self):
with self.db.transaction() as txn:
self.kv['k1'] = 'v1'
self.kv['k2'] = 'v2'
txn.commit()
self.kv['k2'] = 'v2-e'
txn.rollback()
self.assertEqual(self.kv['k1'], 'v1')
self.assertEqual(self.kv['k2'], 'v2')
with self.db.transaction() as txn:
self.kv['k1'] = 'v1-e'
self.kv['k2'] = 'v2-e'
txn.rollback()
self.kv['k1'] = 'v1-e2'
self.assertEqual(self.kv['k1'], 'v1-e2')
self.assertEqual(self.kv['k2'], 'v2')
class TestCursor(BaseTestCase):
def setUp(self):
super(TestCursor, self).setUp()
self.test_data = [
('aa', 'r1'),
('bb', 'r2'),
('bbb', 'r3'),
('dd', 'r4'),
('ee', 'r5'),
('gg', 'r6'),
('zz', 'r7'),
]
for key, value in self.test_data:
self.kv[key] = value
def test_simple_iteration(self):
documents = [row for row in self.kv]
data = [(doc.key, doc.body) for doc in documents]
self.assertEqual(self.test_data, data)
def test_multi_iterations(self):
cursor = self.kv.cursor()
data = [(doc.key, doc.body) for doc in cursor]
self.assertEqual(self.test_data, data)
data = [(doc.key, doc.body) for doc in cursor]
self.assertEqual(self.test_data, data)
def assertRange(self, cursor, expected):
data = [(doc.key, doc.body) for doc in cursor]
self.assertEqual(data, expected)
def test_cursor_range_start(self):
cursor = self.kv.cursor(start='dd') # Key exists.
self.assertRange(cursor, [
('dd', 'r4'),
('ee', 'r5'),
('gg', 'r6'),
('zz', 'r7')])
cursor = self.kv.cursor(start='dd', skip_start=True)
self.assertRange(cursor, [
('ee', 'r5'),
('gg', 'r6'),
('zz', 'r7')])
cursor = self.kv.cursor(start='de') # Key does not exist.
self.assertRange(cursor, [
('ee', 'r5'),
('gg', 'r6'),
('zz', 'r7')])
cursor = self.kv.cursor(start='de', skip_start=True)
self.assertRange(cursor, [
('ee', 'r5'), # No effect since not exact match.
('gg', 'r6'),
('zz', 'r7')])
cursor = self.kv.cursor(start='\x01') # Key below first record.
self.assertRange(cursor, self.test_data)
cursor = self.kv.cursor(start='\x01', skip_start=True)
self.assertRange(cursor, self.test_data)
cursor = self.kv.cursor(start='\xff') # Key after last record.
self.assertRange(cursor, [])
def test_cursor_range_stop(self):
cursor = self.kv.cursor(stop='dd') # Key exists.
self.assertRange(cursor, [
('aa', 'r1'),
('bb', 'r2'),
('bbb', 'r3'),
('dd', 'r4')])
cursor = self.kv.cursor(stop='dd', skip_stop=True)
self.assertRange(cursor, [
('aa', 'r1'),
('bb', 'r2'),
('bbb', 'r3')])
cursor = self.kv.cursor(stop='cc') # Key does not exist.
self.assertRange(cursor, [
('aa', 'r1'),
('bb', 'r2'),
('bbb', 'r3')])
cursor = self.kv.cursor(stop='cc', skip_stop=True)
self.assertRange(cursor, [
('aa', 'r1'),
('bb', 'r2'),
('bbb', 'r3')])
cursor = self.kv.cursor(stop='\x01') # Key below first record.
self.assertRange(cursor, [])
cursor = self.kv.cursor(stop='\xff') # Key after last record.
self.assertRange(cursor, self.test_data)
cursor = self.kv.cursor(stop='\xff', skip_stop=True)
self.assertRange(cursor, self.test_data)
def test_start_stop(self):
cursor = self.kv.cursor(start='bb', stop='ee') # Both exist.
self.assertRange(cursor, [
('bb', 'r2'),
('bbb', 'r3'),
('dd', 'r4'),
('ee', 'r5')])
cursor = self.kv.cursor(start='cc', stop='ff') # Neither exist.
self.assertRange(cursor, [
('dd', 'r4'),
('ee', 'r5')])
cursor = self.kv.cursor(start='\x01', stop='\x02') # Below.
self.assertRange(cursor, [])
cursor = self.kv.cursor(start='\xfe', stop='\xff') # Above.
self.assertRange(cursor, [])
cursor = self.kv.cursor(start='aa', stop='aa') # Same, exists.
self.assertRange(cursor, [('aa', 'r1')])
cursor = self.kv.cursor(start='cc', stop='cc') # Same, not exists.
self.assertRange(cursor, [])
def test_skip_start_stop(self):
cursor = self.kv.cursor('dd', 'gg', True, True) # Both exist.
self.assertRange(cursor, [('ee', 'r5')])
cursor = self.kv.cursor('dc', 'fg', True, True) # Neither exist.
self.assertRange(cursor, [('dd', 'r4'), ('ee', 'r5')])
cursor = self.kv.cursor(start='aa', stop='aa', skip_start=True)
self.assertRange(cursor, [])
cursor = self.kv.cursor(start='aa', stop='aa', skip_stop=True)
self.assertRange(cursor, [])
def test_start_gt_stop(self):
cursor = self.kv.cursor(start='dd', stop='aa')
self.assertRange(cursor, [])
def test_reverse(self):
cursor = self.kv.cursor(start='aa', stop='dd', reverse=True)
self.assertRange(cursor, [
('dd', 'r4'),
('bbb', 'r3'),
('bb', 'r2'),
('aa', 'r1')])
cursor = self.kv.cursor(start='bc', stop='kk', reverse=True)
self.assertRange(cursor, [
('gg', 'r6'),
('ee', 'r5'),
('dd', 'r4')])
cursor = self.kv.cursor(start='cc', reverse=True)
self.assertRange(cursor, [
('zz', 'r7'),
('gg', 'r6'),
('ee', 'r5'),
('dd', 'r4')])
cursor = self.kv.cursor(stop='cc', reverse=True)
self.assertRange(cursor, [
('bbb', 'r3'),
('bb', 'r2'),
('aa', 'r1')])
cursor = self.kv.cursor(reverse=True)
self.assertRange(cursor, list(reversed(self.test_data)))
class TestSnapshots(BaseTestCase):
def test_snapshot(self):
self.kv.update(k1='v1', k2='v2', k3='v3')
snap = self.kv.snapshot()
self.assertEqual(snap['k1'], 'v1')
self.assertEqual(snap['k2'], 'v2')
self.assertEqual(snap['k3'], 'v3')
self.kv['k1'] = 'v1-e'
self.kv['k2'] = 'v2-e'
del self.kv['k3']
self.assertEqual(snap['k1'], 'v1')
self.assertEqual(snap['k2'], 'v2')
self.assertEqual(snap['k3'], 'v3')
self.assertEqual(self.kv['k1'], 'v1-e')
self.assertEqual(self.kv['k2'], 'v2-e')
self.assertFalse('k3' in self.kv)
snap2 = self.kv.snapshot()
self.kv['k1'] = 'v1-e2'
self.kv['k3'] = 'v3-e2'
self.assertEqual(snap['k1'], 'v1')
self.assertEqual(snap['k2'], 'v2')
self.assertEqual(snap['k3'], 'v3')
self.assertEqual(snap2['k1'], 'v1-e')
self.assertEqual(snap2['k2'], 'v2-e')
self.assertFalse('k3' in snap2)
self.assertEqual(self.kv['k1'], 'v1-e2')
self.assertEqual(self.kv['k2'], 'v2-e')
self.assertEqual(self.kv['k3'], 'v3-e2')
self.assertEqual([doc.body for doc in snap], ['v1', 'v2', 'v3'])
self.assertEqual([doc.body for doc in snap2], ['v1-e', 'v2-e'])
if __name__ == '__main__':
unittest.main(argv=sys.argv)
| |
#
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from novaclient import client as base_client
from novaclient import exceptions as nova_exceptions
import requests
from six.moves.urllib import parse as urlparse
from heat.tests import fakes
NOVA_API_VERSION = "2.1"
Client = base_client.Client(NOVA_API_VERSION).__class__
def fake_exception(status_code=404, message=None, details=None):
resp = mock.Mock()
resp.status_code = status_code
resp.headers = None
body = {'error': {'message': message, 'details': details}}
return nova_exceptions.from_response(resp, body, None)
class FakeClient(fakes.FakeClient, Client):
def __init__(self, *args, **kwargs):
super(FakeClient, self).__init__(direct_use=False)
self.client = FakeSessionClient(session=mock.Mock(), **kwargs)
class FakeSessionClient(base_client.SessionClient):
def __init__(self, *args, **kwargs):
super(FakeSessionClient, self).__init__(*args, **kwargs)
self.callstack = []
def request(self, url, method, **kwargs):
# Check that certain things are called correctly
if method in ['GET', 'DELETE']:
assert 'body' not in kwargs
elif method == 'PUT':
assert 'body' in kwargs
# Call the method
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
kwargs.update(args)
munged_url = url.rsplit('?', 1)[0]
munged_url = munged_url.strip('/').replace('/', '_').replace(
'.', '_').replace(' ', '_')
munged_url = munged_url.replace('-', '_')
callback = "%s_%s" % (method.lower(), munged_url)
if not hasattr(self, callback):
raise AssertionError('Called unknown API method: %s %s, '
'expected fakes method name: %s' %
(method, url, callback))
# Note the call
self.callstack.append((method, url, kwargs.get('body')))
status, body = getattr(self, callback)(**kwargs)
response = requests.models.Response()
if isinstance(status, dict):
response.status_code = status.pop("status")
response.headers = status
else:
response.status_code = status
return response, body
#
# Servers
#
def get_servers_detail(self, **kw):
if kw.get('marker') == '56789':
return (200, {"servers": []})
return (
200,
{"servers": [{"id": "1234",
"name": "sample-server",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "e4d909c290d0fb1ca068ffaddf22cbd0",
"status": "BUILD",
"progress": 60,
"addresses": {"public": [{"version": 4,
"addr": "1.2.3.4"},
{"version": 4,
"addr": "5.6.7.8"}],
"private": [{"version": 4,
"addr": "10.11.12.13"}]},
"accessIPv4": "",
"accessIPv6": "",
"metadata": {"Server Label": "Web Head 1",
"Image Version": "2.1"}},
{"id": "5678",
"name": "sample-server2",
"OS-EXT-AZ:availability_zone": "nova2",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server2",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"accessIPv4": "192.0.2.0",
"accessIPv6": "::babe:4317:0A83",
"addresses": {"public": [{"version": 4,
"addr": "4.5.6.7",
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:22:aa"},
{"version": 4,
"addr": "5.6.9.8",
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:33:bb"}],
"private": [{"version": 4,
"addr": "10.13.12.13",
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:44:cc"}]},
"metadata": {}},
{"id": "9101",
"name": "hard-reboot",
"OS-EXT-SRV-ATTR:instance_name":
"hard-reboot",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "9e44d8d435c43dd8d96bb63ed995605f",
"status": "HARD_REBOOT",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {"public": [{"version": 4,
"addr": "172.17.1.2"},
{"version": 4,
"addr": "10.20.30.40"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {"Server Label": "DB 1"}},
{"id": "9102",
"name": "server-with-no-ip",
"OS-EXT-SRV-ATTR:instance_name":
"server-with-no-ip",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "c1365ba78c624df9b2ff446515a682f5",
"status": "ACTIVE",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {"empty_net": []},
"metadata": {"Server Label": "DB 1"}},
{"id": "9999",
"name": "sample-server3",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server3",
"OS-EXT-AZ:availability_zone": "nova3",
"image": {"id": 3, "name": "sample image"},
"flavor": {"id": 3, "name": "m1.large"},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {
"public": [{"version": 4, "addr": "4.5.6.7"},
{"version": 4, "addr": "5.6.9.8"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {"Server Label": "DB 1"},
"os-extended-volumes:volumes_attached":
[{"id":
"66359157-dace-43ab-a7ed-a7e7cd7be59d"}]},
{"id": 56789,
"name": "server-with-metadata",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server2",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"accessIPv4": "192.0.2.0",
"accessIPv6": "::babe:4317:0A83",
"addresses": {"public": [{"version": 4,
"addr": "4.5.6.7"},
{"version": 4,
"addr": "5.6.9.8"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {'test': '123', 'this': 'that'}}]})
def get_servers_1234(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_56789(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][5]}
return (200, r)
def get_servers_WikiServerOne(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_WikiServerOne1(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_WikiServerOne2(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][3]}
return (200, r)
def get_servers_5678(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][1]}
return (200, r)
def delete_servers_1234(self, **kw):
return (202, None)
def get_servers_9999(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][4]}
return (200, r)
def get_servers_9102(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][3]}
return (200, r)
#
# Server actions
#
def post_servers_1234_action(self, body, **kw):
_body = None
resp = 202
assert len(body.keys()) == 1
action = next(iter(body))
if action == 'reboot':
assert list(body[action].keys()) == ['type']
assert body[action]['type'] in ['HARD', 'SOFT']
elif action == 'rebuild':
keys = list(body[action].keys())
if 'adminPass' in keys:
keys.remove('adminPass')
assert keys == ['imageRef']
_body = self.get_servers_1234()[1]
elif action == 'resize':
assert list(body[action].keys()) == ['flavorRef']
elif action == 'confirmResize':
assert body[action] is None
# This one method returns a different response code
return (204, None)
elif action in ['revertResize',
'migrate',
'rescue', 'unrescue',
'suspend', 'resume',
'lock', 'unlock',
]:
assert body[action] is None
elif action == 'addFixedIp':
assert list(body[action].keys()) == ['networkId']
elif action in ['removeFixedIp',
'addFloatingIp',
'removeFloatingIp',
]:
assert list(body[action].keys()) == ['address']
elif action == 'createImage':
assert set(body[action].keys()) == set(['name', 'metadata'])
resp = {"status": 202,
"location": "http://blah/images/456"}
elif action == 'changePassword':
assert list(body[action].keys()) == ['adminPass']
elif action == 'os-getConsoleOutput':
assert list(body[action].keys()) == ['length']
return (202, {'output': 'foo'})
elif action == 'os-getVNCConsole':
assert list(body[action].keys()) == ['type']
elif action == 'os-migrateLive':
assert set(body[action].keys()) == set(['host',
'block_migration',
'disk_over_commit'])
elif action == 'forceDelete':
assert body is not None
else:
raise AssertionError("Unexpected server action: %s" % action)
return (resp, _body)
def post_servers_5678_action(self, body, **kw):
_body = None
resp = 202
assert len(body.keys()) == 1
action = next(iter(body))
if action in ['addFloatingIp',
'removeFloatingIp',
]:
assert list(body[action].keys()) == ['address']
return (resp, _body)
#
# Flavors
#
def get_flavors(self, **kw):
return (200, {'flavors': [
{'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10,
'OS-FLV-EXT-DATA:ephemeral': 10},
{'id': 2, 'name': 'm1.small', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 20},
{'id': 3, 'name': 'm1.large', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 30}
]})
def get_flavors_256_MB_Server(self, **kw):
raise fake_exception()
def get_flavors_m1_small(self, **kw):
raise fake_exception()
def get_flavors_m1_large(self, **kw):
raise fake_exception()
def get_flavors_1(self, **kw):
return (200, {'flavor': {
'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10,
'OS-FLV-EXT-DATA:ephemeral': 10}})
def get_flavors_2(self, **kw):
return (200, {'flavor': {
'id': 2, 'name': 'm1.small', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 20}})
def get_flavors_3(self, **kw):
return (200, {'flavor': {
'id': 3, 'name': 'm1.large', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 30}})
#
# Floating ips
#
def get_os_floating_ips_1(self, **kw):
return (200, {'floating_ip': {'id': 1,
'fixed_ip': '10.0.0.1',
'ip': '11.0.0.1'}})
def post_os_floating_ips(self, body, **kw):
return (202, self.get_os_floating_ips_1()[1])
def delete_os_floating_ips_1(self, **kw):
return (204, None)
#
# Images
#
def get_images_detail(self, **kw):
return (200, {'images': [{'id': 1,
'name': 'CentOS 5.2',
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "ACTIVE",
"metadata": {"test_key": "test_value"},
"links": {}},
{"id": 743,
"name": "My Server Backup",
"serverId": 1234,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}},
{"id": 744,
"name": "F17-x86_64-gold",
"serverId": 9999,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}},
{"id": 745,
"name": "F17-x86_64-cfntools",
"serverId": 9998,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}},
{"id": 746,
"name": "F20-x86_64-cfntools",
"serverId": 9998,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}}]})
def get_images_1(self, **kw):
return (200, {'image': self.get_images_detail()[1]['images'][0]})
get_images_456 = get_images_1
get_images_image_name = get_images_1
#
# Keypairs
#
def get_os_keypairs(self, *kw):
return (200, {"keypairs": [{'fingerprint': 'FAKE_KEYPAIR',
'name': 'test',
'public_key': 'foo'}]})
def get_os_keypairs_test(self, *kw):
return (200, {"keypair": {'fingerprint': 'FAKE_KEYPAIR',
'name': 'test',
'public_key': 'foo'}})
def get_os_keypairs_test2(self, *kw):
raise fake_exception()
def get_os_availability_zone(self, *kw):
return (200, {"availabilityZoneInfo": [{'zoneName': 'nova1'}]})
def get_os_networks(self, **kw):
return (200, {'networks':
[{'label': 'public',
'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'},
{'label': 'foo',
'id': '42'},
{'label': 'foo',
'id': '42'}]})
#
# Limits
#
def get_limits(self, *kw):
return (200, {'limits': {'absolute': {'maxServerMeta': 3,
'maxPersonalitySize': 10240,
'maxPersonality': 5}}})
| |
#! python
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# (C) 2001-2010 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
# compatibility for older Python < 2.6
try:
bytes
bytearray
except (NameError, AttributeError):
# Python older than 2.6 do not have these types. Like for Python 2.6 they
# should behave like str. For Python older than 3.0 we want to work with
# strings anyway, only later versions have a true bytes type.
bytes = str
# bytearray is a mutable type that is easily turned into an instance of
# bytes
class bytearray(list):
# for bytes(bytearray()) usage
def __str__(self): return ''.join(self)
def __repr__(self): return 'bytearray(%r)' % ''.join(self)
# append automatically converts integers to characters
def append(self, item):
if isinstance(item, str):
list.append(self, item)
else:
list.append(self, chr(item))
# +=
def __iadd__(self, other):
for byte in other:
self.append(byte)
return self
def __getslice__(self, i, j):
return bytearray(list.__getslice__(self, i, j))
def __getitem__(self, item):
if isinstance(item, slice):
return bytearray(list.__getitem__(self, item))
else:
return ord(list.__getitem__(self, item))
def __eq__(self, other):
if isinstance(other, str):
other = bytearray(other)
return list.__eq__(self, other)
# ``memoryview`` was introduced in Python 2.7 and ``bytes(some_memoryview)``
# isn't returning the contents (very unfortunate). Therefore we need special
# cases and test for it. Ensure that there is a ``memoryview`` object for older
# Python versions. This is easier than making every test dependent on its
# existence.
try:
memoryview
except (NameError, AttributeError):
# implementation does not matter as we do not realy use it.
# it just must not inherit from something else we might care for.
class memoryview:
pass
# all Python versions prior 3.x convert ``str([17])`` to '[17]' instead of '\x11'
# so a simple ``bytes(sequence)`` doesn't work for all versions
def to_bytes(seq):
"""convert a sequence to a bytes type"""
if isinstance(seq, bytes):
return seq
elif isinstance(seq, bytearray):
return bytes(seq)
elif isinstance(seq, memoryview):
return seq.tobytes()
else:
b = bytearray()
for item in seq:
# This if statement is what we added to fix the bug occuring on download.
if isinstance(item, str):
item = ord(item)
b.append(item) # this one handles int and str for our emulation and ints for Python 3.x
return bytes(b)
# create control bytes
XON = to_bytes([17])
XOFF = to_bytes([19])
CR = to_bytes([13])
LF = to_bytes([10])
PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = 'N', 'E', 'O', 'M', 'S'
STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO = (1, 1.5, 2)
FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
PARITY_NAMES = {
PARITY_NONE: 'None',
PARITY_EVEN: 'Even',
PARITY_ODD: 'Odd',
PARITY_MARK: 'Mark',
PARITY_SPACE: 'Space',
}
class SerialException(IOError):
"""Base class for serial port related exceptions."""
class SerialTimeoutException(SerialException):
"""Write timeouts give an exception"""
writeTimeoutError = SerialTimeoutException('Write timeout')
portNotOpenError = SerialException('Attempting to use a port that is not open')
class FileLike(object):
"""An abstract file like class.
This class implements readline and readlines based on read and
writelines based on write.
This class is used to provide the above functions for to Serial
port objects.
Note that when the serial port was opened with _NO_ timeout that
readline blocks until it sees a newline (or the specified size is
reached) and that readlines would never return and therefore
refuses to work (it raises an exception in this case)!
"""
def __init__(self):
self.closed = True
def close(self):
self.closed = True
# so that ports are closed when objects are discarded
def __del__(self):
"""Destructor. Calls close()."""
# The try/except block is in case this is called at program
# exit time, when it's possible that globals have already been
# deleted, and then the close() call might fail. Since
# there's nothing we can do about such failures and they annoy
# the end users, we suppress the traceback.
try:
self.close()
except:
pass
def writelines(self, sequence):
for line in sequence:
self.write(line)
def flush(self):
"""flush of file like objects"""
pass
# iterator for e.g. "for line in Serial(0): ..." usage
def __next__(self):
line = self.readline()
if not line: raise StopIteration
return line
def __iter__(self):
return self
def readline(self, size=None, eol=LF):
"""read a line which is terminated with end-of-line (eol) character
('\n' by default) or until timeout."""
leneol = len(eol)
line = bytearray()
while True:
c = self.read(1)
if c:
line += c
if line[-leneol:] == eol:
break
if size is not None and len(line) >= size:
break
else:
break
return bytes(line)
def readlines(self, sizehint=None, eol=LF):
"""read a list of lines, until timeout.
sizehint is ignored."""
if self.timeout is None:
raise ValueError("Serial port MUST have enabled timeout for this function!")
leneol = len(eol)
lines = []
while True:
line = self.readline(eol=eol)
if line:
lines.append(line)
if line[-leneol:] != eol: # was the line received with a timeout?
break
else:
break
return lines
def xreadlines(self, sizehint=None):
"""Read lines, implemented as generator. It will raise StopIteration on
timeout (empty read). sizehint is ignored."""
while True:
line = self.readline()
if not line: break
yield line
# other functions of file-likes - not used by pySerial
#~ readinto(b)
def seek(self, pos, whence=0):
raise IOError("file is not seekable")
def tell(self):
raise IOError("file is not seekable")
def truncate(self, n=None):
raise IOError("file is not seekable")
def isatty(self):
return False
class SerialBase(object):
"""Serial port base class. Provides __init__ function and properties to
get/set port settings."""
# default values, may be overridden in subclasses that do not support all values
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200, 230400, 460800, 500000,
576000, 921600, 1000000, 1152000, 1500000, 2000000, 2500000,
3000000, 3500000, 4000000)
BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE)
STOPBITS = (STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO)
def __init__(self,
port = None, # number of device, numbering starts at
# zero. if everything fails, the user
# can specify a device string, note
# that this isn't portable anymore
# port will be opened if one is specified
baudrate=9600, # baud rate
bytesize=EIGHTBITS, # number of data bits
parity=PARITY_NONE, # enable parity checking
stopbits=STOPBITS_ONE, # number of stop bits
timeout=None, # set a timeout value, None to wait forever
xonxoff=False, # enable software flow control
rtscts=False, # enable RTS/CTS flow control
writeTimeout=None, # set a timeout for writes
dsrdtr=False, # None: use rtscts setting, dsrdtr override if True or False
interCharTimeout=None # Inter-character timeout, None to disable
):
"""Initialize comm port object. If a port is given, then the port will be
opened immediately. Otherwise a Serial port object in closed state
is returned."""
self._isOpen = False
self._port = None # correct value is assigned below through properties
self._baudrate = None # correct value is assigned below through properties
self._bytesize = None # correct value is assigned below through properties
self._parity = None # correct value is assigned below through properties
self._stopbits = None # correct value is assigned below through properties
self._timeout = None # correct value is assigned below through properties
self._writeTimeout = None # correct value is assigned below through properties
self._xonxoff = None # correct value is assigned below through properties
self._rtscts = None # correct value is assigned below through properties
self._dsrdtr = None # correct value is assigned below through properties
self._interCharTimeout = None # correct value is assigned below through properties
# assign values using get/set methods using the properties feature
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.writeTimeout = writeTimeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.dsrdtr = dsrdtr
self.interCharTimeout = interCharTimeout
if port is not None:
self.open()
def isOpen(self):
"""Check if the port is opened."""
return self._isOpen
# - - - - - - - - - - - - - - - - - - - - - - - -
# TODO: these are not really needed as the is the BAUDRATES etc. attribute...
# maybe i remove them before the final release...
def getSupportedBaudrates(self):
return [(str(b), b) for b in self.BAUDRATES]
def getSupportedByteSizes(self):
return [(str(b), b) for b in self.BYTESIZES]
def getSupportedStopbits(self):
return [(str(b), b) for b in self.STOPBITS]
def getSupportedParities(self):
return [(PARITY_NAMES[b], b) for b in self.PARITIES]
# - - - - - - - - - - - - - - - - - - - - - - - -
def setPort(self, port):
"""Change the port. The attribute portstr is set to a string that
contains the name of the port."""
was_open = self._isOpen
if was_open: self.close()
if port is not None:
if isinstance(port, str):
self.portstr = port
else:
self.portstr = self.makeDeviceName(port)
else:
self.portstr = None
self._port = port
self.name = self.portstr
if was_open: self.open()
def getPort(self):
"""Get the current port setting. The value that was passed on init or using
setPort() is passed back. See also the attribute portstr which contains
the name of the port as a string."""
return self._port
port = property(getPort, setPort, doc="Port setting")
def setBaudrate(self, baudrate):
"""Change baud rate. It raises a ValueError if the port is open and the
baud rate is not possible. If the port is closed, then the value is
accepted and the exception is raised when the port is opened."""
try:
b = int(baudrate)
except TypeError:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
else:
if b <= 0:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
self._baudrate = b
if self._isOpen: self._reconfigurePort()
def getBaudrate(self):
"""Get the current baud rate setting."""
return self._baudrate
baudrate = property(getBaudrate, setBaudrate, doc="Baud rate setting")
def setByteSize(self, bytesize):
"""Change byte size."""
if bytesize not in self.BYTESIZES: raise ValueError("Not a valid byte size: %r" % (bytesize,))
self._bytesize = bytesize
if self._isOpen: self._reconfigurePort()
def getByteSize(self):
"""Get the current byte size setting."""
return self._bytesize
bytesize = property(getByteSize, setByteSize, doc="Byte size setting")
def setParity(self, parity):
"""Change parity setting."""
if parity not in self.PARITIES: raise ValueError("Not a valid parity: %r" % (parity,))
self._parity = parity
if self._isOpen: self._reconfigurePort()
def getParity(self):
"""Get the current parity setting."""
return self._parity
parity = property(getParity, setParity, doc="Parity setting")
def setStopbits(self, stopbits):
"""Change stop bits size."""
if stopbits not in self.STOPBITS: raise ValueError("Not a valid stop bit size: %r" % (stopbits,))
self._stopbits = stopbits
if self._isOpen: self._reconfigurePort()
def getStopbits(self):
"""Get the current stop bits setting."""
return self._stopbits
stopbits = property(getStopbits, setStopbits, doc="Stop bits setting")
def setTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
try:
timeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % (timeout,))
if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
self._timeout = timeout
if self._isOpen: self._reconfigurePort()
def getTimeout(self):
"""Get the current timeout setting."""
return self._timeout
timeout = property(getTimeout, setTimeout, doc="Timeout setting for read()")
def setWriteTimeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
try:
timeout + 1 #test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % timeout)
self._writeTimeout = timeout
if self._isOpen: self._reconfigurePort()
def getWriteTimeout(self):
"""Get the current timeout setting."""
return self._writeTimeout
writeTimeout = property(getWriteTimeout, setWriteTimeout, doc="Timeout setting for write()")
def setXonXoff(self, xonxoff):
"""Change XON/XOFF setting."""
self._xonxoff = xonxoff
if self._isOpen: self._reconfigurePort()
def getXonXoff(self):
"""Get the current XON/XOFF setting."""
return self._xonxoff
xonxoff = property(getXonXoff, setXonXoff, doc="XON/XOFF setting")
def setRtsCts(self, rtscts):
"""Change RTS/CTS flow control setting."""
self._rtscts = rtscts
if self._isOpen: self._reconfigurePort()
def getRtsCts(self):
"""Get the current RTS/CTS flow control setting."""
return self._rtscts
rtscts = property(getRtsCts, setRtsCts, doc="RTS/CTS flow control setting")
def setDsrDtr(self, dsrdtr=None):
"""Change DsrDtr flow control setting."""
if dsrdtr is None:
# if not set, keep backwards compatibility and follow rtscts setting
self._dsrdtr = self._rtscts
else:
# if defined independently, follow its value
self._dsrdtr = dsrdtr
if self._isOpen: self._reconfigurePort()
def getDsrDtr(self):
"""Get the current DSR/DTR flow control setting."""
return self._dsrdtr
dsrdtr = property(getDsrDtr, setDsrDtr, "DSR/DTR flow control setting")
def setInterCharTimeout(self, interCharTimeout):
"""Change inter-character timeout setting."""
if interCharTimeout is not None:
if interCharTimeout < 0: raise ValueError("Not a valid timeout: %r" % interCharTimeout)
try:
interCharTimeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % interCharTimeout)
self._interCharTimeout = interCharTimeout
if self._isOpen: self._reconfigurePort()
def getInterCharTimeout(self):
"""Get the current inter-character timeout setting."""
return self._interCharTimeout
interCharTimeout = property(getInterCharTimeout, setInterCharTimeout, doc="Inter-character timeout setting for read()")
# - - - - - - - - - - - - - - - - - - - - - - - -
_SETTINGS = ('baudrate', 'bytesize', 'parity', 'stopbits', 'xonxoff',
'dsrdtr', 'rtscts', 'timeout', 'writeTimeout', 'interCharTimeout')
def getSettingsDict(self):
"""Get current port settings as a dictionary. For use with
applySettingsDict"""
return dict([(key, getattr(self, '_'+key)) for key in self._SETTINGS])
def applySettingsDict(self, d):
"""apply stored settings from a dictionary returned from
getSettingsDict. it's allowed to delete keys from the dictionary. these
values will simply left unchanged."""
for key in self._SETTINGS:
if d[key] != getattr(self, '_'+key): # check against internal "_" value
setattr(self, key, d[key]) # set non "_" value to use properties write function
# - - - - - - - - - - - - - - - - - - - - - - - -
def __repr__(self):
"""String representation of the current port settings and its state."""
return "%s<id=0x%x, open=%s>(port=%r, baudrate=%r, bytesize=%r, parity=%r, stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" % (
self.__class__.__name__,
id(self),
self._isOpen,
self.portstr,
self.baudrate,
self.bytesize,
self.parity,
self.stopbits,
self.timeout,
self.xonxoff,
self.rtscts,
self.dsrdtr,
)
# - - - - - - - - - - - - - - - - - - - - - - - -
# compatibility with io library
def readable(self): return True
def writable(self): return True
def seekable(self): return False
def readinto(self, b):
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
if __name__ == '__main__':
import sys
s = SerialBase()
sys.stdout.write('port name: %s\n' % s.portstr)
sys.stdout.write('baud rates: %s\n' % s.getSupportedBaudrates())
sys.stdout.write('byte sizes: %s\n' % s.getSupportedByteSizes())
sys.stdout.write('parities: %s\n' % s.getSupportedParities())
sys.stdout.write('stop bits: %s\n' % s.getSupportedStopbits())
sys.stdout.write('%s\n' % s)
| |
#
# See top-level LICENSE.rst file for Copyright information
#
"""
desispec.workflow.schedule
==========================
Tools for scheduling MPI jobs using mpi4py
"""
import numpy as np
from logging import getLogger
class Schedule:
def __init__(self, workfunc, comm=None, njobs=2, group_size=1):
"""
Intialize class for scheduling MPI jobs using mpi4py
Args:
workfunc: function to do each MPI job defined using
def workfunc(groupcomm,job):
where groupcomm is an MPI communicator
and job is an integer in the range 0 to njobs - 1
Keyword Args:
comm: MPI communicator (default=None)
njobs: number of jobs (default=2)
group_size: number of MPI processes per job (default=1)
Initialization of this class results in ngroups = (comm.Get_size() - 1) // group_size
new communicators (groups) being created, each with size group_size,
using comm.Split.
Functionality of this class is provided via the Schedule.run method. The
process in comm with rank = 0 will be dedicated to scheduling, while processes
with rank > ngroups * group_size will remain idle, and processes with
0 < rank < ngroups * group_size will run workfunc in groups of size group_size.
In the case njobs >= ngroups, all ranks in each of the groups will first be
assigned to call workfunc with arguments job = 0 to job = ngroups-1, in
parallel. The first group to finish workfunc will then call workfunc with
job = ngroups, the next group to finish will call workfunc with
job = ngroups + 1, and so on, until workfunc has returned for all njobs values
of job.
In the case njobs < ngroups, only ranks assigned to the first njobs groups
will run workfunc, while the rest will remain idle, until workfunc has returned
for all njobs values of job.
"""
# user provided function that will do the work
self._workfunc = workfunc
self.comm = comm
self.njobs = njobs
self.group_size = group_size
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
self.log = getLogger(__name__)
# numpy array for sending and receiving job indices
self.job_buff = np.zeros(1,dtype=np.int32)
if self.group_size > self.size - 1:
raise Exception("can't have group_size larger than world size - 1")
# set number of groups and group for this rank
self.ngroups = (self.size-1) // self.group_size
self.group = (self.rank-1) // self.group_size
# assign rank=0 and 'extra' ranks to group ngroups
# only ranks with group < ngroups participate as workers
if self.rank > self.group_size * self.ngroups or self.rank == 0:
self.group = self.ngroups
# generate a new communicator for ngroups processes of size group_size
self.groupcomm = self.comm.Split(color=self.group)
# check for consistency between specified group_size and that of new communicator
if self.group_size != self.groupcomm.Get_size() and self.rank != 0:
self.log.error(f'FAILED: rank {self.rank} with group_size = '+
f'{self.group_size} and groupcomm.Get_size() returning '+
f'{self.groupcomm.Get_size()}')
raise Exception("inconsistent group size")
def _assign_job(self,worker,job):
"""
Assign job to a group of processes
Args:
worker: index of group of processes
job: index of job to be assigned
Returns:
reqs: list of mpi4py.MPI.Request objects, one for each process in group
"""
# assign job to all processes in group worker
# and return list of handles corresponding to
# confirmation of completion of current job
reqs = []
for grouprank in range(self.group_size):
destrank = worker * self.group_size + grouprank + 1
self.job_buff[0] = job
self.comm.Send(self.job_buff,dest=destrank)
if job >= 0: reqs.append(self.comm.Irecv(self.job_buff,source=destrank))
return reqs
def _checkreqlist(self,reqs):
"""
Check for completion of jobs by all processes in group
Args:
reqs: list of mpi4py.MPI.Request objects, one for each process in group
Returns:
bool: True if all messages corresponding to reqs received, False otherwise
"""
# check if all processes with handles in reqs have reported back
for req in reqs:
if not req.Test():
return False
return True
def _schedule(self):
"""
Schedule, run and assign processes for all jobs in this object
"""
# bookkeeping
waitlist = [] # message handles for pending worker groups
worker_groups = [] # worker assigned
# start by assigning ngroups jobs, one to each of the ngroups groups
nextjob=0
for job in range(self.ngroups):
worker = nextjob
reqs=self._assign_job(worker,nextjob)
waitlist.append(reqs)
worker_groups.append(worker)
nextjob += 1
# the scheduler waits for jobs to be completed;
# when one is complete it assigns the next job
# until there are none left
Ncomplete = 0
while(Ncomplete < self.njobs):
# iterate over list of currently pending group of processes
for i in range(len(waitlist)):
# check for completion of all processes in this worker group
if self._checkreqlist(waitlist[i]):
# all ranks group doing job corresponding to place i in waitlist
# have returned; identify this worker group and remove it from the
# waitlist and worker list
worker = worker_groups[i]
Ncomplete += 1
waitlist.pop(i)
worker_groups.pop(i)
if nextjob < self.njobs:
# more jobs to do; assign processes in group worker
# the job with index nextjob, increment
reqs=self._assign_job(worker,nextjob)
waitlist.append(reqs)
worker_groups.append(worker)
nextjob += 1
# waitlist has been modified so exit waitlist loop with break
break
# no more jobs to assign; dismiss all processes in all groups by
# assigning job = -1, causing all workers processes to return
for worker in range(self.ngroups):
self._assign_job(worker,-1)
return
def _work(self):
"""
Listen for job assignments and run workfunc
"""
# listen for job assignments from the scheduler
while True:
self.comm.Recv(self.job_buff,source=0) # receive assignment from rank=0 scheduler
job = self.job_buff[0] # unpack job index
if job < 0: return # job < 0 means no more jobs to do
try:
self._workfunc(self.groupcomm,job) # call work function for job
except Exception as e:
self.log.error(f'FAILED: call to workfunc for job {job}'+
f' on rank {self.rank}')
self.log.error(e)
self.comm.Isend(self.job_buff,dest=0) # send non-blocking message on completion
return
def run(self):
"""
Run schedulers and workers for this object
"""
# main function of class
if self.rank==0:
self._schedule() # run scheduler on rank = 0
elif self.group < self.ngroups:
self._work() # run worker on all other ranks
self.comm.barrier()
return
| |
#!/usr/bin/env python
"""
Count Muts Unique
by Brendan Kohrn and Mike Schmitt
Version 1.41
August 20, 2014
Modified from count-muts.py and count-muts-unique.py
Edited by Brendan Kohrn to fix a problem with 0 depth where no 0 depth should be and to allow n-length indels, and to merge the two conditions into one program.
This script pulls out the mutation frequencies from a pileup file given as stdin, or can take an imput file using the -i option, and writes to stdout, or can take an output file name.
Sites with less than MINDEPTH, or clonalities outside of the range MIN_CLONALITY-MAX_CLONALITY, are excluded from analysis.
If -u is specified, this program counts each mutation exactly once (i.e. clonal expansions are counted as a single mutation)
Usage:
cat seq.pileup | CountMuts.py [-h] [-d MINDEPTH] [-C MAX_CLONALITY] [-c MIN_CLONALITY] [-n N_CUTOFF] [-s START] [-e END] [-u] > outfile.countmuts
optional arguments:
-h, --help show this help message and exit
-d MINDEPTH, --depth MINDEPTH
Minimum depth for counting mutations at a site
(default = 20)
-C MAX_CLONALITY, --max_clonality MAX_CLONALITY
Cutoff of mutant reads for scoring a clonal mutation
(default = 0.3)
-c MIN_CLONALITY, --min_clonality MIN_CLONALITY
Cutoff of mutant reads for scoring a clonal mutation
(default = 0)
-n N_CUTOFF, --n_cutoff N_CUTOFF
Maximum fraction of N's allowed to score a position
(default = 0.05)
-s START, --start START
Position at which to start scoring for mutations
(default = 0)
-e END, --end END Position at which to stop scoring for mutations. If
set to 0, no position filtering will be performed
(default = 0)
-u, --unique run countMutsUnique instead of countMuts
"""
from __future__ import print_function
from argparse import ArgumentParser
import sys
import re
from math import sqrt
def Wilson(positive, total) :
if total == 0:
print("Hi")
return 0
freq = float(positive)/float(total)
z = 1.96 #1.96 = 95%
phat = float(positive) / total
positiveCI = (phat + z*z/(2*total) + z * sqrt((phat*(1-phat)+z*z/(4*total))/total))/(1+z*z/total)
negativeCI = (phat + z*z/(2*total) - z * sqrt((phat*(1-phat)+z*z/(4*total))/total))/(1+z*z/total)
return (phat, positiveCI , negativeCI )
def CountMutations(o, f, fOut):
depths = []
Aseq = 0
AtoT = 0
AtoC = 0
AtoG = 0
Tseq = 0
TtoA = 0
TtoC = 0
TtoG = 0
Cseq = 0
CtoA = 0
CtoT = 0
CtoG = 0
Gseq = 0
GtoA = 0
GtoT = 0
GtoC = 0
ins = {0:0}
dels = {0:0}
#mpFile = open("testMPfile.mutpos", "w") #ADDED
#mpFirst = True #ADDED
for line in f:
linebins = line.split()
#convert sequence information to uppercase
linebins[4] = linebins[4].replace('t','T')
linebins[4] = linebins[4].replace('c','C')
linebins[4] = linebins[4].replace('g','G')
linebins[4] = linebins[4].replace('a','A')
linebins[4] = linebins[4].replace('n','N')
#count depth
depth = int(linebins[3]) - linebins[4].count('N')
#count and remove insertions
newIns = map(int, re.findall(r'\+\d+', linebins[4]))
if o.unique:
newIns = list(set(newIns))
for length in newIns:
if length not in ins:
ins[length] = 1
else:
ins[length] += 1
rmStr = r'\+' + str(length) + "."*length
linebins[4] = re.sub(rmStr, '', linebins[4])
#count and remove deletions
newDels = map(str, re.findall(r'-\d+', linebins[4]))
if o.unique:
newDels = list(set(newDels))
for length in newDels:
length = int(length[1:])
if length not in dels:
dels[length] = 1
else:
dels[length] += 1
rmStr = r'-' + str(length) + "."*length
linebins[4] = re.sub(rmStr, '', linebins[4])
#skip sites that fall outside of specified start/end ranges, that have insufficient depth or that have clonal mutations or excess frequency of N's:
if o.end !=0 and int(linebins[1]) < o.start:
pass
elif o.end !=0 and int(linebins[1]) > o.end:
pass
elif (float(linebins[4].count('N'))/(float(depth) + float(linebins[4].count('N')))) > o.n_cutoff:
pass
elif depth < o.mindepth:
pass
elif (float(max(linebins[4].count('T'),linebins[4].count('C'),linebins[4].count('G'),linebins[4].count('A'), (max(newIns.count(n) for n in list(set(newIns))) if newIns != [] else 0), (max(newDels.count(m) for m in list(set(newDels))) if newDels != [] else 0))) / float(depth)) > o.max_clonality:
pass
elif (float(max(linebins[4].count('T'),linebins[4].count('C'),linebins[4].count('G'),linebins[4].count('A'), (max(newIns.count(n) for n in list(set(newIns))) if newIns != [] else 0), (max(newDels.count(m) for m in list(set(newDels))) if newDels != [] else 0))) / float(depth)) < o.min_clonality:
pass
else:
#remove N entries
#linebins[4] = linebins[4].replace('N','')
#remove start line and end line markers
linebins[4] = re.sub('\$','',linebins[4])
linebins[4] = re.sub('\^.','',linebins[4])
#count point mutations
if linebins[2] == 'A':
Aseq += depth
if linebins[4].count('T') > 0: AtoT += (1 if o.unique else linebins[4].count('T'))
if linebins[4].count('C') > 0: AtoC += (1 if o.unique else linebins[4].count('C'))
if linebins[4].count('G') > 0: AtoG += (1 if o.unique else linebins[4].count('G'))
elif linebins[2] == 'T':
Tseq += depth
if linebins[4].count('A') > 0: TtoA += (1 if o.unique else linebins[4].count('A'))
if linebins[4].count('C') > 0: TtoC += (1 if o.unique else linebins[4].count('C'))
if linebins[4].count('G') > 0: TtoG += (1 if o.unique else linebins[4].count('G'))
elif linebins[2] == 'C':
Cseq += depth
if linebins[4].count('A') > 0: CtoA += (1 if o.unique else linebins[4].count('A'))
if linebins[4].count('T') > 0: CtoT += (1 if o.unique else linebins[4].count('T'))
if linebins[4].count('G') > 0: CtoG += (1 if o.unique else linebins[4].count('G'))
elif linebins[2] == 'G':
Gseq += depth
if linebins[4].count('A') > 0: GtoA += (1 if o.unique else linebins[4].count('A'))
if linebins[4].count('T') > 0: GtoT += (1 if o.unique else linebins[4].count('T'))
if linebins[4].count('C') > 0: GtoC += (1 if o.unique else linebins[4].count('C'))
#if mpFirst: #ADDED
#mpFirst = False #ADDED
#else: #ADDED
#mpFile.write("\n") #ADDED
#mpFile.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (linebins[0],linebins[2], linebins[1], depth, linebins[4].count('T') + linebins[4].count('C') + linebins[4].count('G') + linebins[4].count('A'), linebins[4].count('T'), linebins[4].count('C'), linebins[4].count('G'), linebins[4].count('A'), len(newIns), len(newDels), linebins[4].count('N'))) #ADDED
totalseq = Aseq + Tseq + Cseq + Gseq
totalptmut = AtoT + AtoC + AtoG + TtoA + TtoC + TtoG + CtoA + CtoT + CtoG + GtoA + GtoT + GtoC
totalindel = sum(ins) + sum(dels)
totalins = sum(ins[n] for n in ins.keys())
totaldels = sum(dels[n] for n in dels.keys())
#mpFile.close() #ADDED
print("\nMinimum depth: %s" % o.mindepth, file = fOut)
print("Clonality: %s - %s" % (o.min_clonality, o.max_clonality), file = fOut)
if o.end != 0:
print('Position: %s - %s' % (o.start, o.end), file = fOut)
if o.unique:
print('Unique Counts', file = fOut)
print("\nA's sequenced: %s" % Aseq, file = fOut)
print("Mutation type\t#\tFrequency\t95% positive CI\t95% negative CI", file = fOut)
print(("A to T:\t%s" % AtoT) + ('\t%.2e\t%.2e\t%.2e' % Wilson(AtoT, max(Aseq, 1))), file = fOut) #Output is in the form: Mutation type, number of times mutation is oberseved, frequency, 95% positive CI, 95% negative CI (Confidence Intervals are based on the Wilson Confidence Interval)
print(("A to C:\t%s" % AtoC) + ('\t%.2e\t%.2e\t%.2e' % Wilson(AtoC, max(Aseq, 1))), file = fOut)
print(("A to G:\t%s" % AtoG) + ('\t%.2e\t%.2e\t%.2e' % Wilson(AtoG, max(Aseq, 1))), file = fOut)
print("\nT's sequenced: %s" % Tseq, file = fOut)
print("Mutation type\t#\tFrequency\t95% positive CI\t95% negative CI", file = fOut)
print(("T to A:\t%s" % TtoA) + ('\t%.2e\t%.2e\t%.2e' % Wilson(TtoA, max(Tseq, 1))), file = fOut)
print(("T to C:\t%s" % TtoC) + ('\t%.2e\t%.2e\t%.2e' % Wilson(TtoC, max(Tseq, 1))), file = fOut)
print(("T to G:\t%s" % TtoG) + ('\t%.2e\t%.2e\t%.2e' % Wilson(TtoG, max(Tseq, 1))), file = fOut)
print("\nC's sequenced: %s" % Cseq, file = fOut)
print("Mutation type\t#\tFrequency\t95% positive CI\t95% negative CI", file = fOut)
print(("C to A:\t%s" % CtoA) + ('\t%.2e\t%.2e\t%.2e' % Wilson(CtoA, max(Cseq, 1))), file = fOut)
print(("C to T:\t%s" % CtoT) + ('\t%.2e\t%.2e\t%.2e' % Wilson(CtoT, max(Cseq, 1))), file = fOut)
print(("C to G:\t%s" % CtoG) + ('\t%.2e\t%.2e\t%.2e' % Wilson(CtoG, max(Cseq, 1))), file = fOut)
print("\nG's sequenced: %s" % Gseq, file = fOut)
print("Mutation type\t#\tFrequency\t95% positive CI\t95% negative CI", file = fOut)
print(("G to A:\t%s" % GtoA) + ('\t%.2e\t%.2e\t%.2e' % Wilson(GtoA, max(Gseq, 1))), file = fOut)
print(("G to T:\t%s" % GtoT) + ('\t%.2e\t%.2e\t%.2e' % Wilson(GtoT, max(Gseq, 1))), file = fOut)
print(("G to C:\t%s" % GtoC) + ('\t%.2e\t%.2e\t%.2e' % Wilson(GtoC, max(Gseq, 1))), file = fOut)
print("\nTotal nucleotides sequenced: %s" % totalseq, file = fOut)
print("Total point mutations: %s" % totalptmut, file = fOut)
print("\tFrequency\t95% positive CI\t95% negative CI", file = fOut)
print('Overall point mutation frequency:\t%.2e\t%.2e\t%.2e\n' % Wilson(totalptmut, max(totalseq, 1)), file = fOut)
insKeys = sorted(ins.items(), key=lambda x: x[0])
print("Mutation type\t#\tFrequency\t95% positive CI\t95% negative CI", file = fOut)
for n in insKeys:
print(('+%s insertions: %s' % (n[0], n[1])) + ('\t%.2e\t%.2e\t%.2e' % Wilson(n[1], max(totalseq,1))), file = fOut)
if dels != {}:
print('', file = fOut)
delsKeys = sorted(dels.items(), key=lambda x: x[0])
for n in delsKeys:
print(('-%s deletions: %s' % (n[0], n[1])) + ('\t%.2e\t%.2e\t%.2e' % Wilson(n[1], max(totalseq,1))), file = fOut)
print("\nTotal insertion events: %s" % totalins, file = fOut)
print("\tFrequency\t95% positive CI\t95% negative CI", file = fOut)
print("Overall insert frequency:\t%.2e\t%.2e\t%.2e" % Wilson(totalins, max(totalseq, 1)), file = fOut)
print("\nTotal deletion events: %s" % totaldels, file = fOut)
print("\tFrequency\t95% positive CI\t95% negative CI", file = fOut)
print("Overall deletion frequency:\t%.2e\t%.2e\t%.2e" % Wilson(totaldels, max(totalseq, 1)), file = fOut)
def main():
parser = ArgumentParser()
parser.add_argument('-i', '--infile', action ='store', dest = 'inFile', help = 'An imput file. If None, defaults to stdin. [None]', default = None)
parser.add_argument('-o', '--outfile', action = 'store', dest = 'outFile', help = 'A filename for the output file. If None, outputs to stdout. [None]', default = None)
parser.add_argument("-d", "--depth", action="store", type=int, dest="mindepth",
help="Minimum depth for counting mutations at a site [%(default)s]", default=20)
parser.add_argument("-c", "--min_clonality", action="store", type=float, dest="min_clonality",
help="Cutoff of mutant reads for scoring a clonal mutation [%(default)s]", default=0)
parser.add_argument("-C", "--max_clonality", action="store", type=float, dest="max_clonality",
help="Cutoff of mutant reads for scoring a clonal mutation [%(default)s]", default=0.3)
parser.add_argument("-n", "--n_cutoff", action="store", type=float, dest="n_cutoff",
help="Maximum fraction of N's allowed to score a position [%(default)s]", default=0.05)
parser.add_argument("-s", "--start", action="store", type=int, dest="start",
help="Position at which to start scoring for mutations [%(default)s]", default=0)
parser.add_argument("-e", "--end", action="store", type=int, dest="end",
help="Position at which to stop scoring for mutations. If set to 0, no position filtering will be performed [%(default)s]", default=0)
parser.add_argument('-u', '--unique', action='store_true', dest='unique', help='Run countMutsUnique instead of countMuts')
o = parser.parse_args()
if o.inFile != None:
f = open(o.inFile, 'r')
else:
f = sys.stdin
if o.outFile != None:
fOut = open(o.outFile, 'w')
else:
fOut = sys.stdout
CountMutations(o, f, fOut)
if __name__ == "__main__":
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class OrdersOperations(object):
"""OrdersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2020_09_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OrderList"]
"""Lists all the orders related to a Data Box Edge/Data Box Gateway device.
Lists all the orders related to a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OrderList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_09_01_preview.models.OrderList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OrderList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OrderList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders'} # type: ignore
def get(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Order"
"""Gets a specific order by name.
Gets a specific order by name.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Order, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.Order
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Order"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Order', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
resource_group_name, # type: str
order, # type: "_models.Order"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Order"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Order"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(order, 'Order')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Order', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
resource_group_name, # type: str
order, # type: "_models.Order"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Order"]
"""Creates or updates an order.
Creates or updates an order.
:param device_name: The order details of a device.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param order: The order to be created or updated.
:type order: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.Order
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Order or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2020_09_01_preview.models.Order]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Order"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
resource_group_name=resource_group_name,
order=order,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Order', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
def begin_delete(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the order related to the device.
Deletes the order related to the device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default'} # type: ignore
def list_dc_access_code(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DCAccessCode"
"""Gets the DCAccess Code.
Gets the DCAccess Code.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DCAccessCode, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.DCAccessCode
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DCAccessCode"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self.list_dc_access_code.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DCAccessCode', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_dc_access_code.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/orders/default/listDCAccessCode'} # type: ignore
| |
import warnings
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from menpo.transform import (Affine, Similarity, Rotation, Scale,
NonUniformScale, UniformScale, Translation,
Homogeneous)
from nose.tools import raises
@raises(ValueError)
def test_1d_translation():
t_vec = np.array([1])
Translation(t_vec)
@raises(ValueError)
def test_5d_translation():
t_vec = np.ones(5)
Translation(t_vec)
def test_translation():
t_vec = np.array([1, 2, 3])
starting_vector = np.random.rand(10, 3)
transform = Translation(t_vec)
transformed = transform.apply(starting_vector)
assert_allclose(starting_vector + t_vec, transformed)
def test_basic_2d_rotation():
rotation_matrix = np.array([[0, 1],
[-1, 0]])
rotation = Rotation(rotation_matrix)
assert_allclose(np.array([0, -1]), rotation.apply(np.array([1, 0])))
def test_basic_2d_rotation_axis_angle():
rotation_matrix = np.array([[0, 1],
[-1, 0]])
rotation = Rotation(rotation_matrix)
axis, angle = rotation.axis_and_angle_of_rotation()
assert_allclose(axis, np.array([0, 0, 1]))
assert_allclose((90 * np.pi)/180, angle)
def test_basic_3d_rotation():
a = np.sqrt(3.0)/2.0
b = 0.5
# this is a rotation of -30 degrees about the x axis
rotation_matrix = np.array([[1, 0, 0],
[0, a, b],
[0, -b, a]])
rotation = Rotation(rotation_matrix)
starting_vector = np.array([0, 1, 0])
transformed = rotation.apply(starting_vector)
assert_allclose(np.array([0, a, -b]), transformed)
def test_basic_3d_rotation_axis_angle():
a = np.sqrt(3.0)/2.0
b = 0.5
# this is a rotation of -30 degrees about the x axis
rotation_matrix = np.array([[1, 0, 0],
[0, a, b],
[0, -b, a]])
rotation = Rotation(rotation_matrix)
axis, angle = rotation.axis_and_angle_of_rotation()
assert_allclose(axis, np.array([1, 0, 0]))
assert_allclose((-30 * np.pi)/180, angle)
def test_3d_rotation_inverse_eye():
a = np.sqrt(3.0)/2.0
b = 0.5
# this is a rotation of -30 degrees about the x axis
rotation_matrix = np.array([[1, 0, 0],
[0, a, b],
[0, -b, a]])
rotation = Rotation(rotation_matrix)
transformed = rotation.compose_before(rotation.pseudoinverse())
assert_allclose(np.eye(4), transformed.h_matrix, atol=1e-15)
def test_basic_2d_affine():
linear_component = np.array([[1, -6],
[-3, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
affine = Affine(h_matrix)
x = np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]])
# transform x explicitly
solution = np.dot(x, linear_component.T) + translation_component
# transform x using the affine transform
result = affine.apply(x)
# check that both answers are equivalent
assert_allclose(solution, result)
# create several copies of x
x_copies = np.array([x, x, x, x, x, x, x, x])
# transform all of copies at once using the affine transform
results = affine.apply(x_copies)
# check that all copies have been transformed correctly
for r in results:
assert_allclose(solution, r)
def test_basic_3d_affine():
linear_component = np.array([[1, 6, -4],
[-3, -2, 5],
[5, -1, 3]])
translation_component = np.array([7, -8, 9])
h_matrix = np.eye(4, 4)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
affine = Affine(h_matrix)
x = np.array([[0, 1, 2],
[1, 1, 1],
[-1, 2, -5],
[1, -5, -1]])
# transform x explicitly
solution = np.dot(x, linear_component.T) + translation_component
# transform x using the affine transform
result = affine.apply(x)
# check that both answers are equivalent
assert_allclose(solution, result)
# create several copies of x
x_copies = np.array([x, x, x, x, x, x, x, x])
# transform all of copies at once using the affine transform
results = affine.apply(x_copies)
# check that all copies have been transformed correctly
for r in results:
assert_allclose(solution, r)
def test_basic_2d_similarity():
linear_component = np.array([[2, -6],
[6, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
similarity = Similarity(h_matrix)
x = np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]])
# transform x explicitly
solution = np.dot(x, linear_component.T) + translation_component
# transform x using the affine transform
result = similarity.apply(x)
# check that both answers are equivalent
assert_allclose(solution, result)
# create several copies of x
x_copies = np.array([x, x, x, x, x, x, x, x])
# transform all of copies at once using the affine transform
results = similarity.apply(x_copies)
# check that all copies have been transformed correctly
for r in results:
assert_allclose(solution, r)
def test_similarity_2d_from_vector():
params = np.array([0.2, 0.1, 1, 2])
homo = np.array([[params[0] + 1, -params[1], params[2]],
[params[1], params[0] + 1, params[3]],
[0, 0, 1]])
sim = Similarity.init_identity(2).from_vector(params)
assert_equal(sim.h_matrix, homo)
def test_similarity_2d_as_vector():
params = np.array([0.2, 0.1, 1.0, 2.0])
homo = np.array([[params[0] + 1.0, -params[1], params[2]],
[params[1], params[0] + 1.0, params[3]],
[0.0, 0.0, 1.0]])
vec = Similarity(homo).as_vector()
assert_allclose(vec, params)
def test_translation_2d_from_vector():
params = np.array([1, 2])
homo = np.array([[1, 0, params[0]],
[0, 1, params[1]],
[0, 0, 1]])
tr = Translation.init_identity(2).from_vector(params)
assert_equal(tr.h_matrix, homo)
def test_translation_2d_as_vector():
params = np.array([1, 2])
vec = Translation(params).as_vector()
assert_allclose(vec, params)
def test_translation_3d_from_vector():
params = np.array([1, 2, 3])
homo = np.array([[1, 0, 0, params[0]],
[0, 1, 0, params[1]],
[0, 0, 1, params[2]],
[0, 0, 0, 1]])
tr = Translation.init_identity(3).from_vector(params)
assert_equal(tr.h_matrix, homo)
def test_translation_3d_as_vector():
params = np.array([1, 2, 3])
vec = Translation(params).as_vector()
assert_allclose(vec, params)
def test_uniformscale2d_update_from_vector():
# make a uniform scale of 1, 2 dimensional
uniform_scale = UniformScale(1, 2)
new_scale = 2
homo = np.array([[new_scale, 0, 0],
[0, new_scale, 0],
[0, 0, 1]])
uniform_scale._from_vector_inplace(new_scale)
assert_equal(uniform_scale.h_matrix, homo)
def test_uniformscale2d_as_vector():
scale = 2
vec = UniformScale(scale, 2).as_vector()
assert_allclose(vec, scale)
def test_nonuniformscale2d_from_vector():
scale = np.array([1, 2])
homo = np.array([[scale[0], 0, 0],
[0, scale[1], 0],
[0, 0, 1]])
tr = NonUniformScale.init_identity(2).from_vector(scale)
assert_equal(tr.h_matrix, homo)
def test_nonuniformscale2d_update_from_vector():
scale = np.array([3, 4])
homo = np.array([[scale[0], 0, 0],
[0, scale[1], 0],
[0, 0, 1]])
tr = NonUniformScale(np.array([1, 2]))
tr._from_vector_inplace(scale)
assert_equal(tr.h_matrix, homo)
def test_nonuniformscale2d_as_vector():
scale = np.array([1, 2])
vec = NonUniformScale(scale).as_vector()
assert_allclose(vec, scale)
def test_uniformscale3d_from_vector():
scale = 2
homo = np.array([[scale, 0, 0, 0],
[0, scale, 0, 0],
[0, 0, scale, 0],
[0, 0, 0, 1]])
uniform_scale = UniformScale(1, 3)
tr = uniform_scale.from_vector(scale)
assert_equal(tr.h_matrix, homo)
def test_uniformscale3d_as_vector():
scale = 2
vec = UniformScale(scale, 3).as_vector()
assert_allclose(vec, scale)
def test_uniformscale_build_2d():
scale = 2
homo = np.array([[scale, 0, 0],
[0, scale, 0],
[0, 0, 1]])
tr = UniformScale(scale, 2)
assert_equal(tr.h_matrix, homo)
def test_uniformscale_build_3d():
scale = 2
homo = np.array([[scale, 0, 0, 0],
[0, scale, 0, 0],
[0, 0, scale, 0],
[0, 0, 0, 1]])
tr = UniformScale(scale, 3)
assert(isinstance(tr, UniformScale))
assert_equal(tr.h_matrix, homo)
@raises(ValueError)
def test_uniformscale_build_4d_raise_dimensionalityerror():
UniformScale(1, 4)
def test_scale_build_2d_uniform_pass_dim():
scale = 2
ndim = 2
tr = Scale(scale, ndim)
assert(isinstance(tr, UniformScale))
def test_scale_build_3d_uniform_pass_dim():
scale = 2
ndim = 3
tr = Scale(scale, ndim)
assert(isinstance(tr, UniformScale))
def test_scale_build_2d_nonuniform():
scale = np.array([1, 2])
tr = Scale(scale)
assert(isinstance(tr, NonUniformScale))
def test_scale_build_2d_uniform_from_vec():
scale = np.array([2, 2])
tr = Scale(scale)
assert(isinstance(tr, UniformScale))
@raises(ValueError)
def test_scale_zero_scale_raise_valuerror():
Scale(np.array([1, 0]))
# Vectorizable interface tests
@raises(NotImplementedError)
def test_rotation2d_from_vector_raises_notimplementederror():
Rotation.init_identity(2).from_vector(0)
@raises(NotImplementedError)
def test_rotation2d_as_vector_raises_notimplementederror():
Rotation.init_identity(2).as_vector()
def test_affine_2d_n_parameters():
homo = np.eye(3)
t = Affine(homo)
assert(t.n_parameters == 6)
def test_affine_2d_n_dims_output():
homo = np.eye(3)
t = Affine(homo)
assert(t.n_dims_output == 2)
def test_affine_3d_n_parameters():
homo = np.eye(4)
t = Affine(homo)
assert(t.n_parameters == 12)
def test_similarity_2d_n_parameters():
homo = np.eye(3)
t = Similarity(homo)
assert(t.n_parameters == 4)
@raises(NotImplementedError)
def test_similarity_3d_n_parameters_raises_notimplementederror():
homo = np.eye(4)
t = Similarity(homo)
# Raises exception
t.n_parameters
def test_uniformscale2d_n_parameters():
scale = 2
t = UniformScale(scale, 2)
assert(t.n_parameters == 1)
def test_uniformscale3d_n_parameters():
scale = 2
t = UniformScale(scale, 3)
assert(t.n_parameters == 1)
def test_nonuniformscale_2d_n_parameters():
scale = np.array([1, 2])
t = NonUniformScale(scale)
assert(t.n_parameters == 2)
def test_translation_2d_n_parameters():
trans = np.array([1, 2])
t = Translation(trans)
assert(t.n_parameters == 2)
def test_translation_3d_n_parameters():
trans = np.array([1, 2, 3])
t = Translation(trans)
assert(t.n_parameters == 3)
@raises(NotImplementedError)
def test_rotation2d_n_parameters_raises_notimplementederror():
rot_matrix = np.eye(2)
t = Rotation(rot_matrix)
t.n_parameters
# Test list construction is equivalent to ndarray construction
def test_translation_from_list():
t_a = Translation([3, 4])
t_b = Translation(np.array([3, 4]))
assert(np.all(t_a.h_matrix == t_b.h_matrix))
def test_nonuniformscale_from_list():
u_a = NonUniformScale([3, 2, 3])
u_b = NonUniformScale(np.array([3, 2, 3]))
assert(np.all(u_a.h_matrix == u_b.h_matrix))
# Test set_h_matrix is deprecated (and disabled)
@raises(NotImplementedError)
def test_homogenous_set_h_matrix_raises_notimplementederror():
s = Homogeneous(np.eye(4))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
s.set_h_matrix(s.h_matrix)
def test_homogeneous_print():
e = np.eye(3)
h = Homogeneous(e)
assert(str(h) == 'Homogeneous\n[[ 1. 0. 0.]\n [ 0. 1. 0.]'
'\n [ 0. 0. 1.]]')
def test_homogeneous_eye():
e = np.eye(3)
h = Homogeneous.init_identity(2)
assert_allclose(e, h.h_matrix)
def test_homogeneous_has_true_inverse():
h = Homogeneous.init_identity(2)
assert h.has_true_inverse
def test_homogeneous_inverse():
e = np.eye(3) * 2
e[2, 2] = 1
e_inv = np.eye(3) * 0.5
e_inv[2, 2] = 1
h = Homogeneous(e)
assert_allclose(h.pseudoinverse().h_matrix, e_inv)
def test_homogeneous_apply():
e = np.eye(3) * 2
p = np.random.rand(10, 2)
e[2, 2] = 1
e[:2, -1] = [2, 3]
h = Homogeneous(e)
p_applied = h.apply(p)
p_manual = p * 2 + np.array([2, 3])
assert_allclose(p_applied, p_manual)
def test_homogeneous_apply_batched():
e = np.eye(3) * 2
p = np.random.rand(10, 2)
e[2, 2] = 1
e[:2, -1] = [2, 3]
h = Homogeneous(e)
p_applied = h.apply(p, batch_size=2)
p_manual = p * 2 + np.array([2, 3])
assert_allclose(p_applied, p_manual)
def test_homogeneous_as_vector():
e = np.eye(3) * 2
e[2, 2] = 1
h = Homogeneous(e)
assert_allclose(h.as_vector(), e.flatten())
def test_homogeneous_from_vector_inplace():
h = Homogeneous(np.eye(3))
e = np.eye(3) * 2
e[2, 2] = 1
h._from_vector_inplace(e.ravel())
assert_allclose(h.h_matrix, e)
| |
from __future__ import absolute_import, division, print_function
from blaze.compute.sql import compute, computefull, select, lower_column
from blaze.expr import *
import sqlalchemy
import sqlalchemy as sa
from blaze.compatibility import xfail
from blaze.utils import unique
t = Symbol('t', 'var * {name: string, amount: int, id: int}')
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
)
tbig = Symbol('tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
sbig = sa.Table('accountsbig', metadata,
sa.Column('name', sa.String),
sa.Column('sex', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
)
def normalize(s):
return ' '.join(s.strip().split()).lower()
def test_table():
result = str(computefull(t, s))
expected = """
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
""".strip()
assert normalize(result) == normalize(expected)
def test_projection():
print(compute(t[['name', 'amount']], s))
assert str(compute(t[['name', 'amount']], s)) == \
str(sa.select([s.c.name, s.c.amount]))
def test_eq():
assert str(compute(t['amount'] == 100, s)) == str(s.c.amount == 100)
def test_eq_unicode():
assert str(compute(t['name'] == u'Alice', s)) == str(s.c.name == u'Alice')
def test_selection():
assert str(compute(t[t['amount'] == 0], s)) == \
str(sa.select([s]).where(s.c.amount == 0))
assert str(compute(t[t['amount'] > 150], s)) == \
str(sa.select([s]).where(s.c.amount > 150))
def test_arithmetic():
assert str(computefull(t['amount'] + t['id'], s)) == \
str(sa.select([s.c.amount + s.c.id]))
assert str(compute(t['amount'] + t['id'], s)) == str(s.c.amount + s.c.id)
assert str(compute(t['amount'] * t['id'], s)) == str(s.c.amount * s.c.id)
assert str(computefull(t['amount'] + t['id'] * 2, s)) == \
str(sa.select([s.c.amount + s.c.id * 2]))
def test_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
expected = lhs.join(rhs, lhs.c.name == rhs.c.name)
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
L = Symbol('L', 'var * {name: string, amount: int}')
R = Symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
result = compute(joined, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name""")
assert str(select(result)) == str(select(expected))
# Schemas match
assert list(result.c.keys()) == list(joined.fields)
def test_clean_complex_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = Symbol('L', 'var * {name: string, amount: int}')
R = Symbol('R', 'var * {name: string, id: int}')
joined = join(L[L.amount > 0], R, 'name')
result = compute(joined, {L: lhs, R: rhs})
assert (normalize(str(result)) == normalize("""
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name
WHERE amounts.amount > :amount_1""")
or
normalize(str(result)) == normalize("""
SELECT amounts.name, amounts.amount, ids.id
FROM amounts, (SELECT amounts.name AS name, amounts.amount AS amount
FROM amounts
WHERE amounts.amount > :amount_1) JOIN ids ON amounts.name = ids.name"""))
def test_multi_column_join():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = Symbol('L', 'var * {x: int, y: int, z: int}')
R = Symbol('R', 'var * {w: int, x: int, y: int}')
joined = join(L, R, ['x', 'y'])
expected = lhs.join(rhs, (lhs.c.x == rhs.c.x)
& (lhs.c.y == rhs.c.y))
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
result = compute(joined, {L: lhs, R: rhs})
assert str(result) == str(expected)
assert str(select(result)) == str(select(expected))
# Schemas match
print(result.c.keys())
print(joined.fields)
assert list(result.c.keys()) == list(joined.fields)
def test_unary_op():
assert str(compute(exp(t['amount']), s)) == str(sa.func.exp(s.c.amount))
def test_unary_op():
assert str(compute(-t['amount'], s)) == str(-s.c.amount)
def test_reductions():
assert str(compute(sum(t['amount']), s)) == \
str(sa.sql.functions.sum(s.c.amount))
assert str(compute(mean(t['amount']), s)) == \
str(sa.sql.func.avg(s.c.amount))
assert str(compute(count(t['amount']), s)) == \
str(sa.sql.func.count(s.c.amount))
assert 'amount_sum' == compute(sum(t['amount']), s).name
def test_count_on_table():
assert normalize(str(select(compute(t.count(), s)))) == normalize("""
SELECT count(accounts.id) as count_1
FROM accounts""")
assert normalize(str(select(compute(t[t.amount > 0].count(), s)))) == \
normalize("""
SELECT count(accounts.id) as count_1
FROM accounts
WHERE accounts.amount > :amount_1""")
def test_distinct():
result = str(compute(Distinct(t['amount']), s))
assert 'distinct' in result.lower()
assert 'amount' in result.lower()
print(result)
assert result == str(sa.distinct(s.c.amount))
def test_distinct_multiple_columns():
assert normalize(str(compute(t.distinct(), s))) == normalize("""
SELECT DISTINCT accounts.name, accounts.amount, accounts.id
FROM accounts""")
def test_nunique():
result = str(computefull(nunique(t['amount']), s))
print(result)
assert 'distinct' in result.lower()
assert 'count' in result.lower()
assert 'amount' in result.lower()
@xfail(reason="Fails because SQLAlchemy doesn't seem to know binary reductions")
def test_binary_reductions():
assert str(compute(any(t['amount'] > 150), s)) == \
str(sqlalchemy.sql.functions.any(s.c.amount > 150))
def test_by():
expr = by(t['name'], t['amount'].sum())
result = compute(expr, s)
expected = sa.select([s.c.name,
sa.sql.functions.sum(s.c.amount).label('amount_sum')]
).group_by(s.c.name)
assert str(result) == str(expected)
def test_by_head():
t2 = t.head(100)
expr = by(t2['name'], t2['amount'].sum())
result = compute(expr, s)
# s2 = select(s).limit(100)
# expected = sa.select([s2.c.name,
# sa.sql.functions.sum(s2.c.amount).label('amount_sum')]
# ).group_by(s2.c.name)
expected = """
SELECT accounts.name, sum(accounts.amount) as amount_sum
FROM accounts
GROUP by accounts.name
LIMIT :param_1"""
assert normalize(str(result)) == normalize(str(expected))
def test_by_two():
expr = by(tbig[['name', 'sex']], tbig['amount'].sum())
result = compute(expr, sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.amount).label('amount_sum')])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
(tbig['id'] + tbig['amount']).sum()),
sbig)
assert normalize(str(result)) == normalize("""
SELECT accountsbig.name,
accountsbig.sex,
sum(accountsbig.id + accountsbig.amount) AS sum
FROM accountsbig GROUP BY accountsbig.name, accountsbig.sex
""")
def test_by_summary_clean():
expr = by(t.name, min=t.amount.min(), max=t.amount.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.amount) AS max, min(accounts.amount) AS min
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_join_projection():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = Symbol('L', 'var * {name: string, amount: int}')
R = Symbol('R', 'var * {name: string, id: int}')
want = join(L, R, 'name')[['amount', 'id']]
result = compute(want, {L: lhs, R: rhs})
print(result)
assert 'join' in str(result).lower()
assert result.c.keys() == ['amount', 'id']
assert 'amounts.name = ids.name' in str(result)
def test_sort():
assert str(compute(t.sort('amount'), s)) == \
str(select(s).order_by(s.c.amount))
assert str(compute(t.sort('amount', ascending=False), s)) == \
str(select(s).order_by(sqlalchemy.desc(s.c.amount)))
def test_head():
assert str(compute(t.head(2), s)) == str(select(s).limit(2))
def test_label():
assert str(compute((t['amount'] * 10).label('foo'), s)) == \
str((s.c.amount * 10).label('foo'))
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), s)
expected = select([s.c.name.label('NAME'), s.c.amount, s.c.id.label('ID')])
assert str(result) == str(expected)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_projection_of_selection():
print(compute(t[t['amount'] < 0][['name', 'amount']], s))
assert len(str(compute(t[t['amount'] < 0], s))) > \
len(str(compute(t[t['amount'] < 0][['name', 'amount']], s)))
def test_union():
metadata = sa.MetaData()
ts = [Symbol('t_%d' % i, 'var * {name: string, amount: int, id: int}')
for i in [1, 2, 3]]
ss = [sa.Table('accounts_%d' % i, metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True)) for i in [1, 2, 3]]
expr = union(*ts)
result = str(select(compute(expr, dict(zip(ts, ss)))))
assert "SELECT name, amount, id" in str(result)
assert "accounts_1 UNION accounts_2 UNION accounts_3" in str(result)
def test_outer_join():
L = Symbol('L', 'var * {id: int, name: string, amount: real}')
R = Symbol('R', 'var * {city: string, id: int}')
from blaze.sql import SQL
engine = sa.create_engine('sqlite:///:memory:')
_left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = SQL(engine, 'left', schema=L.schema)
left.extend(_left)
_right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = SQL(engine, 'right', schema=R.schema)
right.extend(_right)
conn = engine.connect()
query = compute(join(L, R, how='inner'), {L: left.table, R: right.table})
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='left'), {L: left.table, R: right.table})
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='right'), {L: left.table, R: right.table})
print(query)
result = list(map(tuple, conn.execute(query).fetchall()))
print(result)
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# SQLAlchemy doesn't support full outer join
"""
query = compute(join(L, R, how='outer'), {L: left.table, R: right.table})
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
"""
conn.close()
def test_summary():
expr = summary(a=t.amount.sum(), b=t.id.count())
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
def test_summary_clean():
t2 = t[t.amount > 0]
expr = summary(a=t2.amount.sum(), b=t2.id.count())
result = str(compute(expr, s))
assert normalize(result) == normalize("""
SELECT sum(accounts.amount) as a, count(accounts.id) as b
FROM accounts
WHERE accounts.amount > :amount_1""")
def test_summary_by():
expr = by(t.name, summary(a=t.amount.sum(), b=t.id.count()))
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
assert 'group by accounts.name' in result.lower()
def test_clean_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
friends = sa.Table('friends', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
tcity = Symbol('city', discover(city))
tfriends = Symbol('friends', discover(friends))
tname = Symbol('name', discover(name))
ns = {tname: name, tfriends: friends, tcity: city}
expr = join(tfriends, tname, 'a', 'id')
assert normalize(str(compute(expr, ns))) == normalize("""
SELECT friends.a, friends.b, name.name
FROM friends JOIN name on friends.a = name.id""")
expr = join(join(tfriends, tname, 'a', 'id'), tcity, 'a', 'id')
assert normalize(str(compute(expr, ns))) == normalize("""
SELECT friends.a, friends.b, name.name, place.city, place.country
FROM friends
JOIN name ON friends.a = name.id
JOIN place ON friends.a = place.id
""")
def test_like():
expr = t.like(name='Alice*')
assert normalize(str(compute(expr, s))) == normalize("""
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
WHERE accounts.name LIKE :name_1""")
def test_columnwise_on_complex_selection():
assert normalize(str(select(compute(t[t.amount > 0].amount + 1, s)))) == \
normalize("""
SELECT accounts.amount + :amount_1 AS anon_1
FROM accounts
WHERE accounts.amount > :amount_2
""")
def test_reductions_on_complex_selections():
assert normalize(str(select(compute(t[t.amount > 0].id.sum(), s)))) == \
normalize("""
SELECT sum(accounts.id) as id_sum
FROM accounts
WHERE accounts.amount > :amount_1 """)
def test_clean_summary_by_where():
t2 = t[t.id ==1]
expr = by(t2.name, sum=t2.amount.sum(), count=t2.amount.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.amount) AS count, sum(accounts.amount) AS sum
FROM accounts
WHERE accounts.id = :id_1
GROUP BY accounts.name
""")
def test_by_on_count():
expr = by(t.name, count=t.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.id) AS count
FROM accounts
GROUP BY accounts.name
""")
def test_join_complex_clean():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
sel = select(name).where(name.c.id > 10)
tname = Symbol('name', discover(name))
tcity = Symbol('city', discover(city))
ns = {tname: name, tcity: city}
expr = join(tname[tname.id > 0], tcity, 'id')
result = compute(expr, ns)
assert normalize(str(result)) == normalize("""
SELECT name.id, name.name, place.city, place.country
FROM name JOIN place ON name.id = place.id
WHERE name.id > :id_1""")
def test_projection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = Symbol('name', discover(name))
tcity = Symbol('city', discover(city))
expr = join(tname, tcity[tcity.city == 'NYC'], 'id')[['country', 'name']]
ns = {tname: name, tcity: city}
assert normalize(str(compute(expr, ns))) == normalize("""
SELECT place.country, name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1""")
def test_lower_column():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = Symbol('name', discover(name))
tcity = Symbol('city', discover(city))
ns = {tname: name, tcity: city}
assert lower_column(name.c.id) is name.c.id
assert lower_column(select(name).c.id) is name.c.id
j = name.join(city, name.c.id == city.c.id)
col = [c for c in j.columns if c.name == 'country'][0]
assert lower_column(col) is city.c.country
def test_selection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = Symbol('name', discover(name))
tcity = Symbol('city', discover(city))
ns = {tname: name, tcity: city}
j = join(tname, tcity, 'id')
expr = j[j.city == 'NYC'].name
result = compute(expr, ns)
assert normalize(str(result)) == normalize("""
SELECT name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1""")
| |
#!/usr/bin/env python3
import sys
if sys.version_info[0] < 3:
raise ImportError('This module only supports Python 3.4 or later. Try use `python3`')
from enum import Enum, IntEnum
import serial
import time
class Bad_Command_Parameter_or_Timing(Exception):
'''Raised if the command, parameter, or timing is wrong.
This exception raises when the controller responds 'NG'.
It offen occurs when drive commands are sent to BUSY controller.
'''
pass
class Not_Supported(Exception):
''' Raised if the command is not supported by the controller.
'''
pass
class Undefined_Controller(Exception):
''' Raised if the controller is not defined or implemented. '''
pass
class Undefined_Stage(Exception):
''' Raised if the stage is not defined or implemented. '''
pass
class Controllers(Enum):
''' Stage controller's type.
Attributes
----------
SHOT_302GS
SHOT_304GS
SHOT_702
TODO
----
Other controllers such as OSM and HIT series.
'''
SHOT_302GS = 1
SHOT_304GS = 2
SHOT_702 = 3
Undefined = 99
class Controller_Modes(Enum):
''' Instruction set of controller's operation.
Attributes
----------
SHOT
SHOT instruction set.
SHOT_enhanced
Enhanced SHOT instruction set.
HIT
HIT instruction set.
'''
SHOT = 0
SHOT_enhanced = 1
HIT = 2
class Comm_ack(Enum):
''' COMM-ACK status of the controller.
Attributes
----------
MAIN
SUB
'''
MAIN = 0
SUB = 1
class Excitation(IntEnum):
''' Excitation mode of the stepping moter.
Attributes
----------
Free
Hold
'''
Free = 0
Hold = 1
class Stages(IntEnum):
''' Stage type.
Attributes
----------
SGSP**_**
SGSP series stages. e.g. Both SGSP46-500(X) and SGSP46-500(Z) are same type: SGSP46_500.
SGSP_**YAW
SGSP_**A**
OSMS**-**
Compatible stages to SGSP series.
HST_**
HST_**YAW
HPS**_**
TAMM**_**
'''
#
# Linear translation stages
#
Linear_stage = 0
# SGSP series
SGSP15_10 = 151
SGSP20_20 = 202
SGSP20_35 = 203
SGSP20_85 = 208
SGSP26_50 = 265
SGSP26_100 = 2610
SGSP26_150 = 2615
SGSP26_200 = 2620
SGSP33_50 = 335
SGSP33_100 = 3310
SGSP33_200 = 3320
SGSP46_300 = 4630
SGSP46_400 = 4640
SGSP46_500 = 4650
SGSP46_800 = 4680
SGSP65_1200 = 6512
SGSP65_1500 = 6515
# OSMS series are compatible to SGSP series.
OSMS15_10 = 151
OSMS20_20 = 202
OSMS20_35 = 203
OSMS20_85 = 208
OSMS26_50 = 265
OSMS26_100 = 2610
OSMS26_150 = 2615
OSMS26_200 = 2620
OSMS33_50 = 335
OSMS33_100 = 3310
OSMS33_200 = 3320
OSMS46_300 = 4630
OSMS46_400 = 4640
OSMS46_500 = 4650
OSMS46_800 = 4680
OSMS65_1200 = 6512
OSMS65_1500 = 6515
# HST series
HST_50 = 5
HST_100 = 10
HST_200 = 20
# HPS series
HPS60_20 = 602
HPS80_50 = 805
HPS120_60 = 1206
# TAMM series
TAMM40_10 = 401
TAMM60_15 = 601
TAMM100_50 = 1005
TAMM100_100 = 1001
Linear_stage_end = 9999
#
# Rotation stages
#
Rotation_stage = 10000
# SGSP series
SGSP_40YAW = 10040
SGSP_60YAW = 10060
SGSP_80YAW = 10080
SGSP_120YAW = 10120
SGSP_160YAW = 10160
# HST series
HST_120YAW = 11120
HST_160YAW = 11160
# TODO: HDS series
Rotation_stage_end = 19999
#
# Gonio stage
#
Gonio_stage = 20000
# SGSP series
SGSP_60A75 = 26075
SGSP_60A100 = 26010
SGSP_60A130 = 26030
Gonio_stage_end = 29999
def __get_baserate(stype):
''' Resolution (full) of each stage.
'''
# translation stages. 1 means 1 micro-meter.
if stype is Stages.SGSP15_10:
return 2
if stype is Stages.SGSP20_20:
return 2
if stype is Stages.SGSP20_35:
return 2
if stype is Stages.SGSP20_85:
return 2
if stype is Stages.SGSP26_50:
return 4
if stype is Stages.SGSP26_100:
return 4
if stype is Stages.SGSP26_150:
return 4
if stype is Stages.SGSP26_200:
return 4
if stype is Stages.SGSP33_50:
return 12
if stype is Stages.SGSP33_100:
return 12
if stype is Stages.SGSP33_200:
return 12
if stype is Stages.SGSP46_300:
return 20
if stype is Stages.SGSP46_400:
return 20
if stype is Stages.SGSP46_500:
return 20
if stype is Stages.SGSP46_800:
return 20
if stype is Stages.SGSP65_1200:
return 50
if stype is Stages.SGSP65_1500:
return 50
if stype is Stages.HST_50:
return 4
if stype is Stages.HST_100:
return 4
if stype is Stages.HST_200:
return 4
if stype is Stages.HPS60_20:
return 2 # TODO: and other HPS series
if stype is Stages.TAMM40_10:
return 2 # TODO: and other TAMM series
# Rotation stages. 1 means 0.001 degree.
if stype is Stages.SGSP_40YAW:
return 5
if stype is Stages.SGSP_60YAW:
return 5
if stype is Stages.SGSP_80YAW:
return 5
if stype is Stages.SGSP_120YAW:
return 5
if stype is Stages.SGSP_160YAW:
return 5
if stype is Stages.HST_120YAW:
return 5 # TODO: and oterh HSP series
# Gonio stages. 1 means 0.001 degree.
if stype is Stages.SGSP_60A75:
return 2
if stype is Stages.SGSP_60A100:
return 1
if stype is Stages.SGSP_60A130:
return 1
raise Undefined_Stage(stype.name + ' is not defined as a valid stage, or just not implemented.')
def is_linear_stage(stype):
''' Returns if the stage is a linear translation stage or not.
Parameters
----------
stype : Stages
Stage type.
Returns
-------
res : bool
Check result.
'''
return int(stype) > int(Stages.Linear_stage) and int(stype) < int(Stages.Linear_stage_end)
def is_rotation_stage(stype):
''' Returns if the stage is a rotation stage or not.
Parameters
----------
stype : Stages
Stage type.
Returns
-------
res : bool
Check result.
'''
return int(stype) > int(Stages.Rotation_stage) and int(stype) < int(Stages.Rotation_stage_end)
def is_gonio_stage(stype):
''' Returns if the stage is a gonio stage or not.
Parameters
----------
stype : Stages
Stage type.
Returns
-------
res : bool
Check result.
'''
return int(stype) > int(Stages.Gonio_stage) and int(stype) < int(Stages.Gonio_stage_end)
def get_value_per_pulse(stype):
''' Get translation step per single pulse.
Parameters
----------
stype : Stages
Stage type.
Returns
-------
val : int
Translation resolution per single pulse.
This value is a full pulse (1 division).
If a stage is a translation stage, the unit is micro-meters,
if a rotation or gonio stage, the unit is milli-degree.
'''
return __get_baserate(stype)
#def get_micro_meter_per_pulse(stype):
# if not is_linear_stage(stype):
# raise Undefined_Stage(stype.name + ' is not a linear stage.')
# return __get_baserate(stype)
#
#def get_milli_degree_per_pulse(stype):
# if not (is_rotation_stage(stype) or is_gonio_stage(stype)):
# raise Undefined_Stage(stype.name + ' is not a rotation stage.')
# return __get_baserate(stype)
class Controller:
'''Class for a controller's parameters.
Parameters
----------
ctype : Controllers
Type of the controller.
Raises
------
Undefined_Controller
Attributes
----------
ctype : Controllers
Type of a controller.
baudrate : int
Baudrate of RS232C communication.
delimiter : str
Delimiter of RS232C communication.
comm_ack : Comm_ack
COMM/ACK mode.
'''
def __init__(self, ctype):
self.ctype = ctype
self.parity = 'N'
self.databit = 8
self.stopbit = 1
self.rtscts = True
self.read_timeout = 1
self.write_timeout = 1
self.delimiter = b'\r\n'
self.comm_ack = Comm_ack.MAIN
# Load default values.
if self.ctype is Controllers.SHOT_302GS or self.ctype is Controllers.SHOT_304GS:
self.baudrate = 9600
self.cmode = Controller_Modes.SHOT
elif ctype is Controllers.SHOT_702:
self.baudrate = 38400
self.cmode = Controller_Modes.SHOT
else:
raise Undefined_Controller('Controller is undefined.')
def __is_30X(self):
return self.ctype is Controllers.SHOT_302GS or self.ctype is Controllers.SHOT_304GS
def __is_70X(self):
return self.ctype is Controllers.SHOT_702
def is_SHOT(self):
if self.cmode is Controller_Modes.SHOT:
return True
if self.cmode is Controller_Modes.SHOT_enhanced:
return True
return False
def is_HIT(self):
return self.cmode is Controller_Modes.HIT
def get_support_baudrates(self):
''' Get the tuple of supported baudrates of this controller.
Returns
-------
baudrates : tuple
A list of supported baudrates.
'''
if self.__is_30X():
return (4800, 9600, 19200, 38400)
if self.__is_70X():
return (38400, )
return ()
def get_support_devisions(self):
''' Get the tuple of supported divisions of this controller.
Returns
-------
divisions : tuple
A list of supportd division values.
'''
return (1,2,4,5,8,10,20,25,40,50,80,100,125,200,250)
def get_support_axes(self):
''' Get the maximum number of controllable stages.
This method does not check the value of AXIS memory switch nor how many stages are connected.
Returns
-------
num : int
The number of controllable stages.
'''
if self.ctype == Controllers.SHOT_304GS:
return 4
return 2
def get_support_speed_ranges(self):
'''Get the range of speed.
Returns
-------
((S_min, S_max), (F_min, F_max), (R_min, R_max))
'''
return ((1, 500000), (1, 500000), (0, 1000))
# Whether the controller supports specific command.
# Moving operations
def is_support_H(self):
return True
def is_support_M(self):
return True
def is_support_A(self):
''' Check if the controller supports "A" command.
Returns
-------
supports : bool
Availability.
Notes
-----
Other functions such as is_support_[Com] are same format.
'''
return True
def is_support_E(self):
if self.__is_30X():
return True
return False
def is_support_K(self):
if self.__is_30X():
return True
return False
def is_support_J(self):
return True
def is_support_G(self):
return True
def is_support_R(self):
return True
def is_support_L(self):
return True
def is_support_D(self):
return True
def is_support_V(self):
if self.__is_30X():
return False
return True
def is_support_U(self):
if self.__is_30X():
return True
return False
def is_support_W(self):
if self.__is_30X():
return True
return False
def is_support_T(self):
if self.__is_30X():
return True
return False
def is_support_C(self):
return True
def is_support_S(self):
return True
# Status operations
def is_support_Q(self):
return True
def is_support_Ex(self):
return True
def is_support_Qu(self):
return True
def is_support_QuV(self):
return True
def is_support_QuP(self):
return True
def is_support_QuS(self):
return True
def is_support_QuD(self):
return True
def is_support_QuB(self):
if self.__is_30X():
return False
return True
def is_support_QuM(self):
if self.__is_30X():
return True
return False
def is_support_QuA(self):
if self.__is_30X():
return True
return False
def is_support_QuO(self):
if self.__is_30X():
return True
return False
def is_support_QuW(self):
if self.__is_30X():
return True
return False
def is_support_QuK(self):
if self.__is_30X():
return True
return False
def is_support_QuE(self):
if self.__is_30X():
return True
return False
# I/O and others
def is_support_O(self):
return True
def is_support_I(self):
return True
def is_support_P(self):
if self.__is_30X():
return True
return False
class Session:
'''Session of controlling OptoSigma's stage.
Parameters
----------
ctype : Controllers
Controller type.
verbose_level : int, optional
More messages are output as higher value is set.
wait_time : int, optional
Polling time while waiting busy status.
Attributes
----------
controller : Controller
Parameters of the controller.
stages : array of Stages
Parameters of connected stages.
'''
def __init__(self, ctype, verbose_level=0, wait_time=2.):
self.controller = Controller(ctype)
self.__d = -1 * len(self.controller.delimiter)
self.stages = []
self.divisions = []
self.verbose_level = verbose_level
self.wait_time = wait_time
self.connected = False
self.divisions_loaded = False
def append_stage(self, stype):
''' Add a new stage parameter.
Setting stage parameters is not mandatory when all operation is done by in_pulse mode.
Parameters
----------
stype : Stages
The stage type of a new axis stage.
'''
if not isinstance(stype, Stages):
raise ValueError('Must be one of Stages Enum.')
self.stages.append(stype)
def connect(self, portname = '/dev/ttyUSB0'):
''' Connect to the controller via RS232C.
Parameters
----------
portname : str, optional
Port name of the RS232C where the controller is connected.
'''
self.port = serial.Serial(port = portname,
baudrate = self.controller.baudrate,
bytesize = self.controller.databit,
parity = self.controller.parity,
stopbits = self.controller.stopbit,
timeout = self.controller.read_timeout,
writeTimeout = self.controller.write_timeout,
rtscts = self.controller.rtscts)
self.connected = True
def __print(self, msg, level=1):
if level <= self.verbose_level:
print(msg)
def __send(self, command, no_response=True):
if not self.connected:
raise Bad_Command_Parameter_or_Timing('Not connected to the controller yet.')
self.port.write(command.encode('ascii') + self.controller.delimiter)
if self.controller.comm_ack is Comm_ack.MAIN or not no_response:
response = (self.port.readline()[:self.__d]).decode('utf-8')
self.__print(command + ' >> ' + response, level=2)
if response == 'NG':
raise Bad_Command_Parameter_or_Timing(command + ' failed.')
return response
self.__print('[SUB] ' + command, level=2)
def is_busy(self, stage=None, func=lambda x:x):
''' Whether the controller is under operation or not.
If the controller is busy, only stop and status retrieve commands are accepted and other commands will fail.
Parameters
----------
stage : int, tuple, list, or None, optional
If the controller is HIT instruction set, busy status of each slave can be obtainded.
In this case, specify the slave to get status. If None, the first slave is obtained.
func : callable
In HIT mode and multiple stages are specified, busy statuses can be summarized.
Set all or any for reduce results. Otherwise, each status is returnd by tuple.
Returns
-------
busy : bool
The controller is busy or not.
See also
--------
is_ready()
'''
if self.controller.is_SHOT():
return self.__is_busy_shot()
else:
raise NotImplemented() # TODO: HIT mode.
def __is_busy_shot(self):
if not self.controller.is_support_Ex():
if not self.controller.is_support_Q():
raise Not_Supported()
return self.get_status()[3].startswith('B')
return self.__send('!:', no_response=False).startswith('B')
def is_ready(self, stage=None, func=lambda x:x):
''' Whether the controller is ready for operation or not.
If the controller is ready, all commands are acceptable, otherwise limited.
Parameters
----------
stage : int, tuple, list, or None, optional
If the controller is HIT instruction set, busy status of each slave can be obtainded.
In this case, specify the slave to get status. If None, the first slave is obtained.
func : callable
In HIT mode and multiple stages are specified, busy statuses can be summarized.
Set all or any for reduce results. Otherwise, each status is returnd by tuple.
Returns
-------
busy : bool
The controller is ready or not.
See also
--------
is_busy()
'''
if self.controller.is_SHOT():
return self.__is_ready_shot()
else:
raise NotImplemented() # TODO: HIT mode.
def __is_ready_shot(self):
if not self.controller.is_support_Ex():
if not self.controller.is_support_Q():
raise Not_Supported()
return self.get_status()[3].startswith('R')
return self.__send('!:', no_response=False).startswith('R')
def __wait_for_ready(self, stage=None, func=lambda x:x):
self.__print('Waiting.')
while self.is_busy(stage=stage, func=func):
self.__print(' sleep ' + str(self.wait_time), level=2)
time.sleep(self.wait_time)
def __load_divisions(self):
if self.controller.is_SHOT():
self.__load_divisions_shot()
else:
raise NotImplemented() #TODO: HIT mode.
def __load_divisions_shot(self):
self.__print('Load division settings.')
if not self.controller.is_support_Qu():
raise Not_Supported()
if not self.controller.is_support_QuS():
raise Not_Supported()
response = self.__send('?:SW', no_response=False)
self.divisions = [int(v) for v in response.split(',')]
self.divisions_loaded = True
def __direction(self, pulse):
return '-' if pulse < 0 else '+'
def reset(self, stage=1, all_stages=False, mechanical=False, wait_for_finish=False):
''' Reset or initialize the position of stages.
Parameters
----------
stage : int, optional
Target stage number. If all_stage is set to True, this value is ignored.
all_stages : bool, optional
If True, all stages are reset simultaneously. In case mechanical is False, all stage parameters must be set.
mechanical : bool, optional
If True, stages will reset to the mechanical origin, otherwise, to the electrical zero point.
wait_for_finish : bool, optional
If True, check status and wait for operation finish.
See also
--------
initialize(), move()
'''
if self.controller.is_SHOT():
self.__reset_shot(stage, all_stages, mechanical, wait_for_finish)
elif self.controller.is_HIT():
raise NotImplemented() # TODO: HIT-mode.
def __reset_shot(self, stage, all_stages, mechanical, wait_for_finish):
if mechanical:
if not self.controller.is_support_H():
raise Not_Supported()
if all_stages:
self.__print('Mechanical reset, all stages.')
self.__send('H:W')
else:
self.__print('Mechanical reset, #' + str(stage))
self.__send('H:' + str(stage))
else:
if not (self.controller.is_support_A() and self.controller.is_support_G()):
raise Not_Supported()
if all_stages:
self.__print('Electrical reset, ' + str(len(self.stages)) + ' stages')
com = ['A:W']
for i in self.stages:
com.append(['+P0'])
self.__send(''.join(com))
self.__send('G:')
else:
self.__print('Electrical reset, #' + str(stage))
self.__send('A:'+str(stage)+'+P0')
self.__send('G:')
if wait_for_finish:
self.__wait_for_ready()
def initialize(self):
''' Initialize all stages at the mechanical origin.
Equivalent to reset(all_stages=True, mechanical=True, wait_for_finish=True)
See also
--------
reset()
'''
self.reset(all_stages=True, mechanical=True, wait_for_finish=True)
def move(self, stage=1, amount=0, in_pulse=False, absolute=False, wait_for_finish=True):
'''Move stages.
Parameters
----------
stage : int
Target operation stage number. When amount is tuple or list, this value is ignored.
amount : int or (tuple or list) of int
Amount of the transition. When a iterable values are specified, corresponding stages are moved simultaneously.
in_pulse : bool, optional
If False, the unit of amount is micro-meters/milli-degrees, otherwise, the unit is pulses.
absolute : bool, optional
Represents specified values are absolute position or relative travel.
wait_for_finish : bool, optional
If True, check status and wait for operation finish.
See also
--------
reset(), jog(), stop(), abort()
'''
if self.controller.is_SHOT():
self.__move_shot(stage, amount, in_pulse, absolute, wait_for_finish)
else:
raise NotImplemented() # TODO: HIT-mode.
def __move_shot(self, stage, amount, in_pulse, absolute, wait_for_finish):
if not self.controller.is_support_G():
raise Not_Supported()
msg = ['Move']
if absolute:
if not self.controller.is_support_A():
raise Not_Supported()
com = ['A:']
msg.append('absolutely,')
else:
if not self.controller.is_support_M():
raise Not_Supported()
com = ['M:']
msg.append('relatively,')
if isinstance(amount, (list, tuple)):
com.append('W')
msg.append(str(len(amount))+' stages,')
msg.extend([str(m) for m in amount])
if not in_pulse:
msg.append('micro-meters/degrees.')
if not self.divisions_loaded:
self.__load_divisions()
amount = [v * d / get_value_per_pulse(p) for p, v, d in zip(self.stages, amount, self.divisions)]
com.extend([self.__direction(v)+'P'+str(abs(v)) for v in [int(m) for m in amount]])
self.__print(' '.join(msg))
self.__send(''.join(com))
self.__send('G:')
else:
com.append(str(stage))
com.append(self.__direction(amount))
com.append('P')
msg.append('#'+str(stage)+', '+str(amount))
if not in_pulse:
msg.append('micro-meters/degrees.')
if not self.divisions_loaded:
self.__load_divisions()
self.__print('Amount: '+str(amount)+' Base rate: '+str(get_value_per_pulse(self.stages[stage-1])) + ' Devision:' + str(self.divisions[stage-1]), level=3)
amount *= self.divisions[stage-1] / get_value_per_pulse(self.stages[stage-1]) if not in_pulse else 1
com.append(str(abs(int(amount))))
self.__print(' '.join(msg))
self.__send(''.join(com))
self.__send('G:')
if wait_for_finish:
self.__wait_for_ready()
def jog(self, stage=1, directions=1):
'''Jog drive. Continue moving before arriving the limit point or system limit.
The drive speed is the slowest speed (S-speed).
To stop the drive, use stop() method.
Jog operation stops automatically when \pm 268,435,455 pulses are sent.
When the position of a stage becomes \pm 999,999,999, operation is aborted and become a overflow alert mode.
In this case, call stop() and set_origin() methods to get back to a normal mode.
Parameters
----------
stage : int
The target stage number. If directions are given by tuple or list, this value is ignored.
directions : int, tuple, or list
Jog drive directions. Only a sign of a number is used.
See also
--------
stop(), abort(), get_status(), set_origin()
'''
if self.controller.is_SHOT():
self.__jog_shot(stage, directions)
else:
raise NotImplemented() # TODO: HIT-mode.
def __jog_shot(self, stage, directions):
if not self.controller.is_support_J():
raise Not_Supported()
if not self.controller.is_support_G():
raise Not_Supported()
if isinstance(directions, (tuple, list)):
arg = [self.__direction(d) for d in directions]
com = 'J:W' + ''.join(arg)
self.__print('Jog drive, multi-stages.')
self.__send(com)
self.__send('G:')
else:
com = 'J:'+str(stage)+self.__direction(directions)
self.__print('Jog drive, single stage.')
self.__send(com)
self.__send('G:')
def stop(self, stage=1, all_stages=False, emergency=False):
''' Stop stages.
Parameters
----------
stage : int
Target stage number. If emergency or all_stages are used, this value is ignored.
all_stages : bool, optional
Stop all stages.
emergency : bool, optional
Force stop all stages immediately. All operations are aborted.
This may cause a big reaction when a stage is moving fast.
Also, this mode does not check the capability of the controller to this operation.
See also
--------
abort(), jog()
'''
if self.controller.is_SHOT():
self.__stop_shot(stage, all_stages, emergency)
else:
raise NotImplemented() # TODO: HIT-mode.
def __stop_shot(self, stage, all_stages, emergency):
if emergency:
self.__send('L:E')
self.__print('Emergency stop.')
return
if not self.controller.is_support_L():
raise Not_Supported()
if all_stages:
self.__print('Stop All stages.')
self.__send('L:W')
else:
self.__print('Stop stage #'+str(stage))
self.__send('L:'+str(stage))
self.__wait_for_ready()
def abort(self):
''' Equivalent to stop(emergency = True).
See also
--------
stop()
'''
self.stop(emergency=True)
def set_origin(self, stage=1, all_stages=False):
''' Set the electrical zero point of a stage at current position.
Parameters
----------
stage : int
Target stage number.
all_stages : bool, optional
If True, all stages' origin point are set.
See also
--------
reset(), initialize()
'''
if self.controller.is_SHOT():
self.__set_origin_shot(stage, all_stages)
else:
raise NotImplemented() # TODO: HIT-mode.
def __set_origin_shot(self, stage, all_stages):
if not self.controller.is_support_R():
raise Not_Supported()
if all_stages:
self.__print('Set origin for all stages.')
self.__send('R:W')
else:
self.__print('Set origin for stage #'+str(stage))
self.__send('R:'+str(stage))
def __check_SFR(self, s, f, r):
s_lim, f_lim, r_lim = self.controller.get_support_speed_ranges()
f = max(f_lim[0], min(f_lim[1], f))
s = max(s_lim[0], min(s_lim[1], s, f))
r = max(r_lim[0], min(r_lim[1], r))
if f >= 8000 and s < 64:
s = 64
return s, f, r
def set_speed(self, stage=1, S=1000, F=10000, R=100):
''' Set drive speed.
Parameters
----------
stage : int
Target stage number. If S, F, R are specified by tuple or list, this value is ignored.
S, F, R : int, tuple, or list
Speed parameters of each stage.
S: the slowest speed, F: the fastest speed, R: acceleration and deceleration time.
See also
--------
set_speed_reset_drive()
'''
if self.controller.is_SHOT():
self.__set_speed_shot(stage, S, F, R)
else:
raise NotImplemented() #TODO: HIT mode.
def __set_speed_shot(self, stage, S, F, R):
if not self.controller.is_support_D():
raise Not_Supported()
if isinstance(S, (tuple, list)):
self.__print('Set speed of multi-stages.')
SFRs = [self.__check_SFR(s, f, r) for s, f, r in zip(S, F, R)]
arg = ['S'+str(s)+'F'+str(f)+'R'+str(r) for s, f, r in SFRs]
com = 'D:W' + ''.join(arg)
self.__send(com)
else:
self.__print('Set speed of stage #'+str(stage))
s, f, r = self.__check_SFR(S, F, R)
com = 'D:'+str(stage)+'S'+str(s)+'F'+str(f)+'R'+str(r)
self.__send(com)
def set_speed_reset_drive(self, stage=1, S=1000, F=10000, R=100):
''' Set drive speed of going back to the origin.
Parameters
----------
stage : int
Target stage number. If S, F, R are specified by tuple or list, this value is ignored.
S, F, R : int, tuple, or list
Speed parameters of each stage.
S: the slowest speed, F: the fastest speed, R: acceleration and deceleration time.
See also
--------
set_speed()
'''
if self.controller.is_SHOT():
self.__set_speed_reset_drive_shot(stage, S, F, R)
else:
raise NotImplemented() # TODO: HIT-mode.
def __set_speed_reset_drive_shot(self, stage, S, F, R):
if not self.controller.is_support_V():
raise Not_Supported()
if isinstance(S, (tuple, list)):
self.__print('Set speed of multi-stages.')
SFRs = [self.__check_SFR(s, f, r) for s, f, r in zip(S, F, R)]
arg = ['S'+str(s)+'F'+str(f)+'R'+str(r) for s, f, r in SFRs]
com = 'V:W' + ''.join(arg)
self.__send(com)
else:
self.__print('Set speed of stage #'+str(stage))
s, f, r = self.__check_SFR(S, F, R)
com = 'V:'+str(stage)+'S'+str(s)+'F'+str(f)+'R'+str(r)
self.__send(com)
def set_excitation_mode(self, stage=1, mode=Excitation.Hold, all_stages=False):
'''Set excitation of the moters.
When releasing excitation, stages can be moved manually.
Parameters
----------
mode : Excitation
Excitation mode.
'''
if self.controller.is_SHOT():
self.__set_excitation_mode_shot(stage, mode, all_stages)
else:
raise NotImplemented() # TODO: HIT-mode.
def __set_excitation_mode_shot(self, stage, mode, all_stages):
if not self.controller.is_support_C():
raise Not_Supported()
if all_stages:
self.__print('Set all stages to '+mode.name)
self.__send('C:W'+str(int(mode)))
else:
self.__print('Set stage #'+str(stage)+' to '+mode.name)
self.__send('C:'+str(stage)+str(int(mode)))
def set_division(self, stage=1, division=2):
''' Set division of step angle of a stepping moter.
Parameters
----------
stage : int
Target stage number.
division : int
Divisions of a stepping moter.
When a stage is operated by closed-loop method, Higher value is recommended.
'''
if self.controller.is_SHOT():
self.__set_division_shot(stage, division)
else:
raise NotImplemented() # TODO: HIT-mode.
def __set_division_shot(self, stage, division):
if not self.controller.is_support_S():
raise Not_Supported()
if not division in self.controller.get_support_devisions():
raise ValueError('Unsupported division value.')
self.__print('Set division of #'+str(stage)+' to '+str(division))
self.__send('S:' + str(stage) + str(division))
self.__load_divisions()
def get_status(self):
''' Get status of stages and the controller.
Returns
-------
positions : list
List of positions of stages.
ack1 : str
'X' or 'K', which represent that the command is denined or accepted, respectively.
ack2 : str
'K' represents all stages are stable, and other strings represent one or more stages are stopped at limit sensor.
ack3 : str
'B' or 'R', which represent the controller is busy or ready, respectively.
See also
--------
get_position(), is_busy(), is_ready()
'''
if self.controller.is_SHOT():
return self.__get_status_shot()
else:
raise NotImplemented() # TODO: HIT mode.
def __get_status_shot(self):
if not self.controller.is_support_Q():
raise Not_Supported()
response = self.__send('Q:', no_response=False)
data = response.split(',')
ack1, ack2, ack3 = data[-3:]
positions = [int(d) for d in data[:len(data) - 3]]
return positions, ack1, ack2, ack3
def get_position(self, in_pulse=False):
''' Get positions of stages.
Parameters
----------
in_pulse : bool, optional
If True, returns a position as is (pulses), otherwise, returns in micro-meters or milli-degrees.
Returns
-------
positions : list
List of positions of stages.
See also
--------
get_status()
'''
positions = self.get_status()[0]
if in_pulse:
return positions
else:
if not self.divisions_loaded:
self.__load_divisions()
return [p * get_value_per_pulse(s) / d for p, s, d in zip(positions, self.stages, self.divisions)]
def __test_304GS_SGSP46():
''' Test code #1 '''
stages = Session(Controllers.SHOT_304GS, verbose_level=3)
stages.append_stage(Stages.SGSP46_800)
stages.connect()
stages.initialize()
stages.set_speed(1, 1000, 10000, 500)
stages.move(amount=100000, wait_for_finish=True)
stages.move(amount=200000, wait_for_finish=True, absolute=True)
stages.set_origin()
stages.jog()
time.sleep(10)
stages.stop()
stages.get_position()
stages.reset(wait_for_finish=True)
stages.move(amount=-200000, wait_for_finish=True, absolute=True)
def __test_702_SGSP120Y():
''' Test code #2 '''
stages = Session(Controllers.SHOT_702, verbose_level=3)
stages.append_stage(Stages.SGSP_120YAW)
stages.connect()
stages.initialize()
stages.set_speed(1, 1000, 10000, 500)
stages.move(amount=45000, wait_for_finish=True)
stages.move(amount=90000, wait_for_finish=True, absolute=True)
stages.set_origin()
stages.jog()
time.sleep(10)
stages.stop()
stages.get_position()
stages.reset(wait_for_finish=True)
stages.move(amount=-90000, wait_for_finish=True, absolute=True)
if __name__ == '__main__':
__test_304GS_SGSP46()
# __test_702_SGSP120Y()
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import logging
import os
import django
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from django.utils import timezone
from django.utils import unittest
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from horizon import exceptions
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.projects import workflows
from openstack_dashboard import policy_backend
from openstack_dashboard.test import helpers as test
from openstack_dashboard import usage
from openstack_dashboard.usage import quotas
with_sel = os.environ.get('WITH_SELENIUM', False)
if with_sel:
from selenium.webdriver import ActionChains # noqa
from selenium.webdriver.common import keys
from socket import timeout as socket_timeout # noqa
INDEX_URL = reverse('horizon:identity:projects:index')
USER_ROLE_PREFIX = workflows.PROJECT_GROUP_MEMBER_SLUG + "_role_"
GROUP_ROLE_PREFIX = workflows.PROJECT_USER_MEMBER_SLUG + "_role_"
PROJECT_DETAIL_URL = reverse('horizon:identity:projects:detail', args=[1])
class TenantsViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=True,
marker=None) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
@test.create_stubs({api.keystone: ('tenant_list', )})
def test_index_with_domain_context(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
domain_tenants = [tenant for tenant in self.tenants.list()
if tenant.domain_id == domain.id]
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=domain.id,
paginate=True,
marker=None) \
.AndReturn([domain_tenants, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, domain_tenants)
self.assertContains(res, "<em>test_domain:</em>")
class ProjectsViewNonAdminTests(test.TestCase):
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
@test.create_stubs({api.keystone: ('tenant_list',)})
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
user=self.user.id,
paginate=True,
marker=None,
admin=False) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'identity/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
class CreateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_project_info(self, project):
domain = self._get_default_domain()
project_info = {"name": project.name,
"description": project.description,
"enabled": project.enabled,
"domain": domain.id}
return project_info
def _get_workflow_fields(self, project):
domain = self._get_default_domain()
project_info = {"domain_id": domain.id,
"domain_name": domain.name,
"name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_workflow_data(self, project, quota):
project_info = self._get_workflow_fields(project)
quota_data = self._get_quota_info(quota)
project_info.update(quota_data)
return project_info
def _get_default_domain(self):
default_domain = self.domain
domain = {"id": self.request.session.get('domain_context',
default_domain.id),
"name": self.request.session.get('domain_context_name',
default_domain.name)}
return api.base.APIDictWrapper(domain)
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
api.neutron: ('is_extension_supported',),
quotas: ('get_default_quota_data',)})
def test_add_project_get(self):
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(True)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<input type="hidden" name="subnet" '
'id="id_subnet" />', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertQuerysetEqual(
workflow.steps,
['<CreateProjectInfo: createprojectinfoaction>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<CreateProjectQuota: create_quotas>'])
def test_add_project_get_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_get()
@test.create_stubs({api.keystone: ('get_default_role',
'user_list',
'group_list',
'role_list',
'domain_get'),
api.neutron: ('is_extension_supported',
'tenant_quota_get'),
quotas: ('get_default_quota_data',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_get_with_neutron(self):
quota = self.quotas.first()
neutron_quotas = self.neutron_quotas.first()
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(quota)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(neutron_quotas)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.users.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:identity:projects:create'))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
if django.VERSION >= (1, 6):
self.assertContains(res, '''
<input class="form-control"
id="id_subnet" min="-1"
name="subnet" type="number" value="10" />
''', html=True)
else:
self.assertContains(res, '''
<input class="form-control"
name="subnet" id="id_subnet"
value="10" type="text" />
''', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['subnet'],
neutron_quotas.get('subnet').limit)
@test.create_stubs({api.keystone: ('get_default_role',
'add_tenant_user_role',
'tenant_create',
'user_list',
'group_list',
'role_list',
'domain_get'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_post(self, neutron=False):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_post_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_post()
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_post_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_add_project_post(neutron=True)
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas')})
def test_add_project_quota_defaults_error(self):
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, "Unable to retrieve default quota values")
def test_add_project_quota_defaults_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_defaults_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_tenant_create_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_tenant_create_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_tenant_create_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.nova: ('tenant_quota_update',)})
def test_add_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_quota_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_update_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages'),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_user_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id) \
.AndRaise(self.exceptions.keystone)
break
break
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_user_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_user_update_error()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_add_project_missing_field_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
workflow_data["name"] = ""
url = reverse('horizon:identity:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, "field is required")
def test_add_project_missing_field_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_missing_field_error()
class UpdateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
def _get_proj_users(self, project_id):
return [user for user in self.users.list()
if user.project_id == project_id]
def _get_proj_groups(self, project_id):
return [group for group in self.groups.list()
if group.project_id == project_id]
def _get_proj_role_assignment(self, project_id):
project_scope = {'project': {'id': project_id}}
return self.role_assignments.filter(scope=project_scope)
@test.create_stubs({api.keystone: ('get_default_role',
'roles_for_user',
'tenant_get',
'domain_get',
'user_list',
'roles_for_group',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_get(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self._get_proj_role_assignment(project.id)
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.UpdateProject.name)
step = workflow.get_step("update_info")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertEqual(step.action.initial['name'], project.name)
self.assertEqual(step.action.initial['description'],
project.description)
self.assertQuerysetEqual(
workflow.steps,
['<UpdateProjectInfo: update_info>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
api.nova: ('tenant_quota_update',),
api.cinder: ('tenant_quota_update',),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_save(self, neutron=False):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
proj_groups = self._get_proj_groups(project.id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
if neutron:
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id) \
.AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='1')
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='2')
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')
# add role 1
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='1')
# Group assignments
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain_id,
project=self.tenant.id).AndReturn(proj_groups)
# admin group - try to remove all roles on current project
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
for role in roles:
api.keystone.remove_group_role(IsA(http.HttpRequest),
role=role.id,
group='1',
project=self.tenant.id)
# member group 1 - has role 1, will remove it
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='1',
group='2',
project=self.tenant.id)
# add role 2
api.keystone.add_group_role(IsA(http.HttpRequest),
role='2',
group='2',
project=self.tenant.id)
# member group 3 - has role 2
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='2',
group='3',
project=self.tenant.id)
# add role 1
api.keystone.add_group_role(IsA(http.HttpRequest),
role='1',
group='3',
project=self.tenant.id)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_get',
'tenant_quota_update')})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_update_project_save_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota_data)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_update_project_save(neutron=True)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_project_get_error(self):
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_tenant_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
proj_users = self._get_proj_users(project.id)
role_assignments = self.role_assignments.list()
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
for user in proj_users:
if role_ids:
workflow_data.setdefault(USER_ROLE_PREFIX + role_ids[0], []) \
.append(user.id)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
role_ids = [role.id for role in roles]
for group in groups:
if role_ids:
workflow_data.setdefault(GROUP_ROLE_PREFIX + role_ids[0], []) \
.append(group.id)
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages',),
api.nova: ('tenant_quota_update',)})
def test_update_project_quota_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
proj_groups = self._get_proj_groups(project.id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# Group role assignment data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota[0].limit = 444
quota[1].limit = -1
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id) \
.AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id) \
.AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id) \
.AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')
# Group assignment
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain_id,
project=self.tenant.id).AndReturn(proj_groups)
# admin group 1- try to remove all roles on current project
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
# member group 1 - has no change
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn((roles[1],))
# member group 3 - has role 1
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn((roles[0],))
# add role 2
api.keystone.add_group_role(IsA(http.HttpRequest),
role='2',
group='3',
project=self.tenant.id)
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list',
'role_assignments_list'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas',
'tenant_quota_usages')})
def test_update_project_member_update_error(self):
keystone_api_version = api.keystone.VERSIONS.active
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
role_assignments = self._get_proj_role_assignment(project.id)
quota_usages = self.quota_usages.first()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
if keystone_api_version >= 3:
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
else:
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(proj_users)
for user in proj_users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
api.keystone.role_assignments_list(IsA(http.HttpRequest),
project=self.tenant.id) \
.AndReturn(role_assignments)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
quotas.tenant_quota_usages(IsA(http.HttpRequest), tenant_id=project.id) \
.AndReturn(quota_usages)
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id).AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id).AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id).AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')\
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=2, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('get_default_role',
'tenant_get',
'domain_get'),
quotas: ('get_tenant_quota_data',
'get_disabled_quotas')})
def test_update_project_when_default_role_does_not_exist(self):
project = self.tenants.first()
domain_id = project.domain_id
quota = self.quotas.first()
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(None) # Default role doesn't exist
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_disabled_quotas(IsA(http.HttpRequest)) \
.AndReturn(self.disabled_quotas.first())
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
self.mox.ReplayAll()
url = reverse('horizon:identity:projects:update',
args=[self.tenant.id])
try:
# Avoid the log message in the test output when the workflow's
# step action cannot be instantiated
logging.disable(logging.ERROR)
with self.assertRaises(exceptions.NotFound):
self.client.get(url)
finally:
logging.disable(logging.NOTSET)
class UsageViewTests(test.BaseAdminViewTests):
def _stub_nova_api_calls(self, nova_stu_enabled=True):
self.mox.StubOutWithMock(api.nova, 'usage_get')
self.mox.StubOutWithMock(api.nova, 'tenant_absolute_limits')
self.mox.StubOutWithMock(api.nova, 'extension_supported')
self.mox.StubOutWithMock(api.cinder, 'tenant_absolute_limits')
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
def _stub_neutron_api_calls(self, neutron_sg_enabled=True):
self.mox.StubOutWithMock(api.neutron, 'is_extension_supported')
self.mox.StubOutWithMock(api.network, 'floating_ip_supported')
self.mox.StubOutWithMock(api.network, 'tenant_floating_ip_list')
if neutron_sg_enabled:
self.mox.StubOutWithMock(api.network, 'security_group_list')
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'security-group').AndReturn(neutron_sg_enabled)
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
if neutron_sg_enabled:
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.q_secgroups.list())
def test_usage_csv(self):
self._test_usage_csv(nova_stu_enabled=True)
def test_usage_csv_disabled(self):
self._test_usage_csv(nova_stu_enabled=False)
def _test_usage_csv(self, nova_stu_enabled=True):
now = timezone.now()
usage_obj = api.nova.NovaUsage(self.usages.first())
self._stub_nova_api_calls(nova_stu_enabled)
api.nova.extension_supported(
'SimpleTenantUsage', IsA(http.HttpRequest)) \
.AndReturn(nova_stu_enabled)
start = datetime.datetime(now.year, now.month, 1, 0, 0, 0, 0)
end = datetime.datetime(now.year, now.month, now.day, 23, 59, 59, 0)
if nova_stu_enabled:
api.nova.usage_get(IsA(http.HttpRequest),
self.tenant.id,
start, end).AndReturn(usage_obj)
api.nova.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.limits['absolute'])
api.cinder.tenant_absolute_limits(IsA(http.HttpRequest)) \
.AndReturn(self.cinder_limits['absolute'])
self._stub_neutron_api_calls()
self.mox.ReplayAll()
project_id = self.tenants.first().id
csv_url = reverse('horizon:identity:projects:usage',
args=[project_id]) + "?format=csv"
res = self.client.get(csv_url)
self.assertTemplateUsed(res, 'project/overview/usage.csv')
self.assertTrue(isinstance(res.context['usage'], usage.ProjectUsage))
hdr = ('Instance Name,VCPUs,RAM (MB),Disk (GB),Usage (Hours),'
'Uptime (Seconds),State')
self.assertContains(res, '%s\r\n' % hdr)
class DetailProjectViewTests(test.BaseAdminViewTests):
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(project)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertTemplateUsed(res, 'identity/projects/detail.html')
self.assertEqual(res.context['project'].name, project.name)
self.assertEqual(res.context['project'].id, project.id)
self.assertContains(res, "Project Details: %s" % project.name,
1, 200)
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_detail_view_with_exception(self):
project = self.tenants.first()
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
res = self.client.get(PROJECT_DETAIL_URL, args=[project.id])
self.assertRedirectsNoFollow(res, INDEX_URL)
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTests(test.SeleniumAdminTestCase):
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get', 'tenant_update')})
def test_inline_editing_update(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Update - requires get and update
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
api.keystone.tenant_update(
IgnoreArg(),
u'1',
description='a test tenant.',
enabled=True,
name=u'Changed test_tenant')
# Refreshing cell with changed name
changed_tenant = copy.copy(self.tenants.list()[0])
changed_tenant.name = u'Changed test_tenant'
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(changed_tenant)
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit button
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Changing project name in cell form
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
name_input = td_element.find_element_by_tag_name('input')
name_input.send_keys(keys.Keys.HOME)
name_input.send_keys("Changed ")
# Saving new project name by AJAX
td_element.find_element_by_class_name('inline-edit-submit').click()
# Waiting for the AJAX response of cell refresh
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']"))
# Checking new project name after cell refresh
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'Changed test_tenant',
"Error: saved tenant name is expected to be "
"'Changed test_tenant'")
@test.create_stubs(
{api.keystone: ('tenant_list', 'tenant_get')})
def test_inline_editing_cancel(self):
# Tenant List
api.keystone.tenant_list(IgnoreArg(),
domain=None,
marker=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
# Edit mod
api.keystone.tenant_get(IgnoreArg(),
u'1',
admin=True) \
.AndReturn(self.tenants.list()[0])
# Cancel edit mod is without the request
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, INDEX_URL))
# Check the presence of the important elements
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
cell_wrapper = td_element.find_element_by_class_name(
'table_cell_wrapper')
edit_button_wrapper = td_element.find_element_by_class_name(
'table_cell_action')
edit_button = edit_button_wrapper.find_element_by_tag_name('button')
# Hovering over td and clicking on edit
action_chains = ActionChains(self.selenium)
action_chains.move_to_element(cell_wrapper).click(edit_button)
action_chains.perform()
# Waiting for the AJAX response for switching to editing mod
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_name("name__1"))
# Click on cancel button
td_element = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']")
td_element.find_element_by_class_name('inline-edit-cancel').click()
# Cancel is via javascript, so it should be immediate
# Checking that tenant name is not changed
data_wrapper = self.selenium.find_element_by_xpath(
"//td[@data-update-url='/identity/?action=cell_update"
"&table=tenants&cell_name=name&obj_id=1']"
"/div[@class='table_cell_wrapper']"
"/div[@class='table_cell_data_wrapper']")
self.assertTrue(data_wrapper.text == u'test_tenant',
"Error: saved tenant name is expected to be "
"'test_tenant'")
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
api.base: ('is_service_enabled',),
quotas: ('get_default_quota_data',)})
def test_membership_list_loads_correctly(self):
member_css_class = ".available_members"
users = self.users.list()
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'volume') \
.MultipleTimes().AndReturn(False)
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(self.domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(self.quotas.first())
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=self.domain.id) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
self.selenium.get("%s%s" %
(self.live_server_url,
reverse('horizon:identity:projects:create')))
members = self.selenium.find_element_by_css_selector(member_css_class)
for user in users:
self.assertIn(user.name, members.text)
| |
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import eventlet
from eventlet import greenthread
import mock
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt import event
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import host
host.libvirt = fakelibvirt
class FakeVirtDomain(object):
def __init__(self, id=-1, name=None):
self._id = id
self._name = name
self._uuid = str(uuid.uuid4())
def name(self):
return self._name
def ID(self):
return self._id
def UUIDString(self):
return self._uuid
class HostTestCase(test.NoDBTestCase):
def setUp(self):
super(HostTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.host = host.Host("qemu:///system")
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback(self, mock_close):
self.close_callback = None
def set_close_callback(cb, opaque):
self.close_callback = cb
mock_close.side_effect = set_close_callback
# verify that the driver registers for the close callback
self.host.get_connection()
self.assertTrue(self.close_callback)
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback_bad_signature(self, mock_close):
'''Validates that a connection to libvirt exist,
even when registerCloseCallback method has a different
number of arguments in the libvirt python library.
'''
mock_close.side_effect = TypeError('dd')
connection = self.host.get_connection()
self.assertTrue(connection)
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback_not_defined(self, mock_close):
'''Validates that a connection to libvirt exist,
even when registerCloseCallback method missing from
the libvirt python library.
'''
mock_close.side_effect = AttributeError('dd')
connection = self.host.get_connection()
self.assertTrue(connection)
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
def test_broken_connection(self, mock_ver):
for (error, domain) in (
(fakelibvirt.VIR_ERR_SYSTEM_ERROR,
fakelibvirt.VIR_FROM_REMOTE),
(fakelibvirt.VIR_ERR_SYSTEM_ERROR,
fakelibvirt.VIR_FROM_RPC),
(fakelibvirt.VIR_ERR_INTERNAL_ERROR,
fakelibvirt.VIR_FROM_RPC)):
conn = self.host._connect("qemu:///system", False)
mock_ver.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Connection broken",
error_code=error,
error_domain=domain)
self.assertFalse(self.host._test_connection(conn))
@mock.patch.object(host, 'LOG')
def test_connect_auth_cb_exception(self, log_mock):
creds = dict(authname='nova', password='verybadpass')
self.assertRaises(exception.NovaException,
self.host._connect_auth_cb, creds, False)
self.assertEqual(0, len(log_mock.method_calls),
'LOG should not be used in _connect_auth_cb.')
def test_event_dispatch(self):
# Validate that the libvirt self-pipe for forwarding
# events between threads is working sanely
def handler(event):
got_events.append(event)
hostimpl = host.Host("qemu:///system",
lifecycle_event_handler=handler)
got_events = []
hostimpl._init_events_pipe()
event1 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STARTED)
event2 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_PAUSED)
hostimpl._queue_event(event1)
hostimpl._queue_event(event2)
hostimpl._dispatch_events()
want_events = [event1, event2]
self.assertEqual(want_events, got_events)
event3 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_RESUMED)
event4 = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STOPPED)
hostimpl._queue_event(event3)
hostimpl._queue_event(event4)
hostimpl._dispatch_events()
want_events = [event1, event2, event3, event4]
self.assertEqual(want_events, got_events)
def test_event_lifecycle(self):
got_events = []
# Validate that libvirt events are correctly translated
# to Nova events
def handler(event):
got_events.append(event)
hostimpl = host.Host("qemu:///system",
lifecycle_event_handler=handler)
conn = hostimpl.get_connection()
hostimpl._init_events_pipe()
fake_dom_xml = """
<domain type='kvm'>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
dom = fakelibvirt.Domain(conn,
fake_dom_xml,
False)
hostimpl._event_lifecycle_callback(
conn, dom, fakelibvirt.VIR_DOMAIN_EVENT_STOPPED, 0, hostimpl)
hostimpl._dispatch_events()
self.assertEqual(len(got_events), 1)
self.assertIsInstance(got_events[0], event.LifecycleEvent)
self.assertEqual(got_events[0].uuid,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
self.assertEqual(got_events[0].transition,
event.EVENT_LIFECYCLE_STOPPED)
def test_event_emit_delayed_call_now(self):
got_events = []
def handler(event):
got_events.append(event)
hostimpl = host.Host("qemu:///system",
lifecycle_event_handler=handler)
ev = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STOPPED)
hostimpl._event_emit_delayed(ev)
self.assertEqual(1, len(got_events))
self.assertEqual(ev, got_events[0])
@mock.patch.object(greenthread, 'spawn_after')
def test_event_emit_delayed_call_delayed(self, spawn_after_mock):
hostimpl = host.Host("xen:///",
lifecycle_event_handler=lambda e: None)
ev = event.LifecycleEvent(
"cef19ce0-0ca2-11df-855d-b19fbce37686",
event.EVENT_LIFECYCLE_STOPPED)
hostimpl._event_emit_delayed(ev)
spawn_after_mock.assert_called_once_with(15, hostimpl._event_emit, ev)
@mock.patch.object(greenthread, 'spawn_after')
def test_event_emit_delayed_call_delayed_pending(self, spawn_after_mock):
hostimpl = host.Host("xen:///",
lifecycle_event_handler=lambda e: None)
uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
gt_mock = mock.Mock()
hostimpl._events_delayed[uuid] = gt_mock
ev = event.LifecycleEvent(
uuid, event.EVENT_LIFECYCLE_STOPPED)
hostimpl._event_emit_delayed(ev)
gt_mock.cancel.assert_called_once_with()
self.assertTrue(spawn_after_mock.called)
def test_event_delayed_cleanup(self):
hostimpl = host.Host("xen:///",
lifecycle_event_handler=lambda e: None)
uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
ev = event.LifecycleEvent(
uuid, event.EVENT_LIFECYCLE_STARTED)
gt_mock = mock.Mock()
hostimpl._events_delayed[uuid] = gt_mock
hostimpl._event_emit_delayed(ev)
gt_mock.cancel.assert_called_once_with()
self.assertNotIn(uuid, hostimpl._events_delayed.keys())
@mock.patch.object(fakelibvirt.virConnect, "domainEventRegisterAny")
@mock.patch.object(host.Host, "_connect")
def test_get_connection_serial(self, mock_conn, mock_event):
def get_conn_currency(host):
host.get_connection().getLibVersion()
def connect_with_block(*a, **k):
# enough to allow another connect to run
eventlet.sleep(0)
self.connect_calls += 1
return fakelibvirt.openAuth("qemu:///system",
[[], lambda: 1, None], 0)
def fake_register(*a, **k):
self.register_calls += 1
self.connect_calls = 0
self.register_calls = 0
mock_conn.side_effect = connect_with_block
mock_event.side_effect = fake_register
# call serially
get_conn_currency(self.host)
get_conn_currency(self.host)
self.assertEqual(self.connect_calls, 1)
self.assertEqual(self.register_calls, 1)
@mock.patch.object(fakelibvirt.virConnect, "domainEventRegisterAny")
@mock.patch.object(host.Host, "_connect")
def test_get_connection_concurrency(self, mock_conn, mock_event):
def get_conn_currency(host):
host.get_connection().getLibVersion()
def connect_with_block(*a, **k):
# enough to allow another connect to run
eventlet.sleep(0)
self.connect_calls += 1
return fakelibvirt.openAuth("qemu:///system",
[[], lambda: 1, None], 0)
def fake_register(*a, **k):
self.register_calls += 1
self.connect_calls = 0
self.register_calls = 0
mock_conn.side_effect = connect_with_block
mock_event.side_effect = fake_register
# call concurrently
thr1 = eventlet.spawn(get_conn_currency, self.host)
thr2 = eventlet.spawn(get_conn_currency, self.host)
# let threads run
eventlet.sleep(0)
thr1.wait()
thr2.wait()
self.assertEqual(self.connect_calls, 1)
self.assertEqual(self.register_calls, 1)
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
@mock.patch.object(fakelibvirt.virConnect, "getVersion")
@mock.patch.object(fakelibvirt.virConnect, "getType")
def test_has_min_version(self, fake_hv_type, fake_hv_ver, fake_lv_ver):
fake_lv_ver.return_value = 1002003
fake_hv_ver.return_value = 4005006
fake_hv_type.return_value = 'xyz'
lv_ver = (1, 2, 3)
hv_ver = (4, 5, 6)
hv_type = 'xyz'
self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, hv_type))
self.assertFalse(self.host.has_min_version(lv_ver, hv_ver, 'abc'))
self.assertFalse(self.host.has_min_version(lv_ver, (4, 5, 7), hv_type))
self.assertFalse(self.host.has_min_version((1, 3, 3), hv_ver, hv_type))
self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, None))
self.assertTrue(self.host.has_min_version(lv_ver, None, hv_type))
self.assertTrue(self.host.has_min_version(None, hv_ver, hv_type))
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
@mock.patch.object(fakelibvirt.virConnect, "getVersion")
@mock.patch.object(fakelibvirt.virConnect, "getType")
def test_has_version(self, fake_hv_type, fake_hv_ver, fake_lv_ver):
fake_lv_ver.return_value = 1002003
fake_hv_ver.return_value = 4005006
fake_hv_type.return_value = 'xyz'
lv_ver = (1, 2, 3)
hv_ver = (4, 5, 6)
hv_type = 'xyz'
self.assertTrue(self.host.has_version(lv_ver, hv_ver, hv_type))
for lv_ver_ in [(1, 2, 2), (1, 2, 4)]:
self.assertFalse(self.host.has_version(lv_ver_, hv_ver, hv_type))
for hv_ver_ in [(4, 4, 6), (4, 6, 6)]:
self.assertFalse(self.host.has_version(lv_ver, hv_ver_, hv_type))
self.assertFalse(self.host.has_version(lv_ver, hv_ver, 'abc'))
self.assertTrue(self.host.has_min_version(lv_ver, hv_ver, None))
self.assertTrue(self.host.has_min_version(lv_ver, None, hv_type))
self.assertTrue(self.host.has_min_version(None, hv_ver, hv_type))
@mock.patch.object(fakelibvirt.virConnect, "lookupByID")
def test_get_domain_by_id(self, fake_lookup):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_lookup.return_value = dom
self.assertEqual(dom, self.host._get_domain_by_id(7))
fake_lookup.assert_called_once_with(7)
@mock.patch.object(fakelibvirt.virConnect, "lookupByID")
def test_get_domain_by_id_raises(self, fake_lookup):
fake_lookup.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'Domain not found: no domain with matching id 7',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN,
error_domain=fakelibvirt.VIR_FROM_QEMU)
self.assertRaises(exception.InstanceNotFound,
self.host._get_domain_by_id,
7)
fake_lookup.assert_called_once_with(7)
@mock.patch.object(fakelibvirt.virConnect, "lookupByName")
def test_get_domain_by_name(self, fake_lookup):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_lookup.return_value = dom
self.assertEqual(dom, self.host._get_domain_by_name("wibble"))
fake_lookup.assert_called_once_with("wibble")
@mock.patch.object(fakelibvirt.virConnect, "lookupByName")
def test_get_domain_by_name_raises(self, fake_lookup):
fake_lookup.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'Domain not found: no domain with matching name',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN,
error_domain=fakelibvirt.VIR_FROM_QEMU)
self.assertRaises(exception.InstanceNotFound,
self.host._get_domain_by_name,
"wibble")
fake_lookup.assert_called_once_with("wibble")
@mock.patch.object(host.Host, "_get_domain_by_name")
def test_get_domain(self, fake_get_domain):
dom = fakelibvirt.virDomain(self.host.get_connection(),
"<domain id='7'/>")
fake_get_domain.return_value = dom
instance = objects.Instance(id="124")
self.assertEqual(dom, self.host.get_domain(instance))
fake_get_domain.assert_called_once_with("instance-0000007c")
@mock.patch.object(fakelibvirt.Connection, "listAllDomains")
def test_list_instance_domains_fast(self, mock_list_all):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
def fake_list_all(flags):
vms = []
if flags & fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE:
vms.extend([vm1, vm2])
if flags & fakelibvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE:
vms.extend([vm3, vm4])
return vms
mock_list_all.side_effect = fake_list_all
doms = self.host._list_instance_domains_fast()
mock_list_all.assert_called_once_with(
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE)
mock_list_all.reset_mock()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
doms = self.host._list_instance_domains_fast(only_running=False)
mock_list_all.assert_called_once_with(
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE |
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE)
self.assertEqual(len(doms), 4)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
self.assertEqual(doms[2].name(), vm3.name())
self.assertEqual(doms[3].name(), vm4.name())
@mock.patch.object(fakelibvirt.Connection, "numOfDomains")
@mock.patch.object(fakelibvirt.Connection, "listDefinedDomains")
@mock.patch.object(fakelibvirt.Connection, "listDomainsID")
@mock.patch.object(host.Host, "_get_domain_by_name")
@mock.patch.object(host.Host, "_get_domain_by_id")
def test_list_instance_domains_slow(self,
mock_get_id, mock_get_name,
mock_list_ids, mock_list_names,
mock_num_ids):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
vms = [vm1, vm2, vm3, vm4]
def fake_get_domain_by_id(id):
for vm in vms:
if vm.ID() == id:
return vm
raise exception.InstanceNotFound(instance_id=id)
def fake_get_domain_by_name(name):
for vm in vms:
if vm.name() == name:
return vm
raise exception.InstanceNotFound(instance_id=name)
def fake_list_ids():
# Include one ID that no longer exists
return [vm1.ID(), vm2.ID(), 666]
def fake_list_names():
# Include one name that no longer exists and
# one dup from running list to show race in
# transition from inactive -> running
return [vm1.name(), vm3.name(), vm4.name(), "fishfood"]
mock_get_id.side_effect = fake_get_domain_by_id
mock_get_name.side_effect = fake_get_domain_by_name
mock_list_ids.side_effect = fake_list_ids
mock_list_names.side_effect = fake_list_names
mock_num_ids.return_value = 2
doms = self.host._list_instance_domains_slow()
mock_list_ids.assert_called_once_with()
mock_num_ids.assert_called_once_with()
self.assertFalse(mock_list_names.called)
mock_list_ids.reset_mock()
mock_list_names.reset_mock()
mock_num_ids.reset_mock()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
doms = self.host._list_instance_domains_slow(only_running=False)
mock_list_ids.assert_called_once_with()
mock_num_ids.assert_called_once_with()
mock_list_names.assert_called_once_with()
self.assertEqual(len(doms), 4)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
self.assertEqual(doms[2].name(), vm3.name())
self.assertEqual(doms[3].name(), vm4.name())
@mock.patch.object(fakelibvirt.Connection, "listAllDomains")
@mock.patch.object(fakelibvirt.Connection, "numOfDomains")
@mock.patch.object(fakelibvirt.Connection, "listDomainsID")
@mock.patch.object(host.Host, "_get_domain_by_id")
def test_list_instance_domains_fallback(self,
mock_get_id, mock_list_ids,
mock_num_ids, mock_list_all):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vms = [vm1, vm2]
def fake_get_domain_by_id(id):
for vm in vms:
if vm.ID() == id:
return vm
raise exception.InstanceNotFound(instance_id=id)
def fake_list_doms():
return [vm1.ID(), vm2.ID()]
def fake_list_all(flags):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"API is not supported",
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
raise ex
mock_get_id.side_effect = fake_get_domain_by_id
mock_list_ids.side_effect = fake_list_doms
mock_num_ids.return_value = 2
mock_list_all.side_effect = fake_list_all
doms = self.host.list_instance_domains()
mock_list_all.assert_called_once_with(
fakelibvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE)
mock_list_ids.assert_called_once_with()
mock_num_ids.assert_called_once_with()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].ID(), vm1.ID())
self.assertEqual(doms[1].ID(), vm2.ID())
@mock.patch.object(host.Host, "_list_instance_domains_fast")
def test_list_instance_domains_filtering(self, mock_list):
vm0 = FakeVirtDomain(id=0, name="Domain-0") # Xen dom-0
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm0, vm1, vm2]
doms = self.host.list_instance_domains()
self.assertEqual(len(doms), 2)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
mock_list.assert_called_with(True)
mock_list.return_value = [vm0, vm1, vm2, vm3, vm4]
doms = self.host.list_instance_domains(only_running=False)
self.assertEqual(len(doms), 4)
self.assertEqual(doms[0].name(), vm1.name())
self.assertEqual(doms[1].name(), vm2.name())
self.assertEqual(doms[2].name(), vm3.name())
self.assertEqual(doms[3].name(), vm4.name())
mock_list.assert_called_with(False)
mock_list.return_value = [vm0, vm1, vm2]
doms = self.host.list_instance_domains(only_guests=False)
self.assertEqual(len(doms), 3)
self.assertEqual(doms[0].name(), vm0.name())
self.assertEqual(doms[1].name(), vm1.name())
self.assertEqual(doms[2].name(), vm2.name())
mock_list.assert_called_with(True)
def test_cpu_features_bug_1217630(self):
self.host.get_connection()
# Test old version of libvirt, it shouldn't see the `aes' feature
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
caps = self.host.get_capabilities()
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
# Cleanup the capabilities cache firstly
self.host._caps = None
# Test new version of libvirt, should find the `aes' feature
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
caps = self.host.get_capabilities()
self.assertIn('aes', [x.name for x in caps.host.cpu.features])
def test_cpu_features_are_not_duplicated(self):
self.host.get_connection()
# Test old version of libvirt. Should return single 'hypervisor'
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
caps = self.host.get_capabilities()
cnt = [x.name for x in caps.host.cpu.features].count('xtpr')
self.assertEqual(1, cnt)
# Cleanup the capabilities cache firstly
self.host._caps = None
# Test new version of libvirt. Should still return single 'hypervisor'
with mock.patch('nova.virt.libvirt.host.libvirt') as mock_libvirt:
mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
caps = self.host.get_capabilities()
cnt = [x.name for x in caps.host.cpu.features].count('xtpr')
self.assertEqual(1, cnt)
def test_baseline_cpu_not_supported(self):
# Handle just the NO_SUPPORT error
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virConnectBaselineCPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU',
side_effect=not_supported_exc):
caps = self.host.get_capabilities()
self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
# Clear cached result so we can test again...
self.host._caps = None
# Other errors should not be caught
other_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'other exc',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU',
side_effect=other_exc):
self.assertRaises(fakelibvirt.libvirtError,
self.host.get_capabilities)
def test_lxc_get_host_capabilities_failed(self):
with mock.patch.object(fakelibvirt.virConnect, 'baselineCPU',
return_value=-1):
caps = self.host.get_capabilities()
self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
@mock.patch.object(fakelibvirt.virConnect, "getHostname")
def test_get_hostname_caching(self, mock_hostname):
mock_hostname.return_value = "foo"
self.assertEqual('foo', self.host.get_hostname())
mock_hostname.assert_called_with()
mock_hostname.reset_mock()
mock_hostname.return_value = "bar"
self.assertEqual('foo', self.host.get_hostname())
mock_hostname.assert_called_with()
@mock.patch.object(fakelibvirt.virConnect, "getType")
def test_get_driver_type(self, mock_type):
mock_type.return_value = "qemu"
self.assertEqual("qemu", self.host.get_driver_type())
mock_type.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "getVersion")
def test_get_version(self, mock_version):
mock_version.return_value = 1005001
self.assertEqual(1005001, self.host.get_version())
mock_version.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "secretLookupByUsage")
def test_find_secret(self, mock_sec):
"""finding secrets with various usage_type."""
expected = [
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_CEPH, 'rbdvol'),
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_CEPH, 'cephvol'),
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_ISCSI, 'iscsivol'),
mock.call(fakelibvirt.VIR_SECRET_USAGE_TYPE_VOLUME, 'vol')]
self.host.find_secret('rbd', 'rbdvol')
self.host.find_secret('ceph', 'cephvol')
self.host.find_secret('iscsi', 'iscsivol')
self.host.find_secret('volume', 'vol')
self.assertEqual(expected, mock_sec.mock_calls)
self.assertRaises(exception.NovaException,
self.host.find_secret, "foo", "foovol")
mock_sec.side_effect = fakelibvirt.libvirtError("")
mock_sec.side_effect.err = (66, )
self.assertIsNone(self.host.find_secret('rbd', 'rbdvol'))
@mock.patch.object(fakelibvirt.virConnect, "secretDefineXML")
def test_create_secret(self, mock_sec):
"""creating secrets with various usage_type."""
self.host.create_secret('rbd', 'rbdvol')
self.host.create_secret('ceph', 'cephvol')
self.host.create_secret('iscsi', 'iscsivol')
self.host.create_secret('volume', 'vol')
self.assertRaises(exception.NovaException,
self.host.create_secret, "foo", "foovol")
secret = mock.MagicMock()
mock_sec.return_value = secret
self.host.create_secret('iscsi', 'iscsivol', password="foo")
secret.setValue.assert_called_once_with("foo")
@mock.patch('nova.virt.libvirt.host.Host.find_secret')
def test_delete_secret(self, mock_find_secret):
"""deleting secret."""
secret = mock.MagicMock()
mock_find_secret.return_value = secret
expected = [mock.call('rbd', 'rbdvol'),
mock.call().undefine()]
self.host.delete_secret('rbd', 'rbdvol')
self.assertEqual(expected, mock_find_secret.mock_calls)
mock_find_secret.return_value = None
self.host.delete_secret("rbd", "rbdvol")
class DomainJobInfoTestCase(test.NoDBTestCase):
def setUp(self):
super(DomainJobInfoTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.conn = fakelibvirt.openAuth("qemu:///system",
[[], lambda: True])
xml = ("<domain type='kvm'>"
" <name>instance-0000000a</name>"
"</domain>")
self.dom = self.conn.createXML(xml, 0)
host.DomainJobInfo._have_job_stats = True
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats(self, mock_stats, mock_info):
mock_stats.return_value = {
"type": fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
"memory_total": 75,
"memory_processed": 50,
"memory_remaining": 33,
"some_new_libvirt_stat_we_dont_know_about": 83
}
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(0, info.disk_total)
self.assertEqual(0, info.disk_processed)
self.assertEqual(0, info.disk_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_support(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_attr_error(self, mock_stats, mock_info):
mock_stats.side_effect = AttributeError("No such API")
mock_info.return_value = [
fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED,
100, 99, 10, 11, 12, 75, 50, 33, 1, 2, 3]
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, info.type)
self.assertEqual(100, info.time_elapsed)
self.assertEqual(99, info.time_remaining)
self.assertEqual(10, info.data_total)
self.assertEqual(11, info.data_processed)
self.assertEqual(12, info.data_remaining)
self.assertEqual(75, info.memory_total)
self.assertEqual(50, info.memory_processed)
self.assertEqual(33, info.memory_remaining)
self.assertEqual(1, info.disk_total)
self.assertEqual(2, info.disk_processed)
self.assertEqual(3, info.disk_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_no_domain(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain with UUID blah",
fakelibvirt.VIR_ERR_NO_DOMAIN)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_stats_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
self.assertFalse(mock_info.called)
@mock.patch.object(fakelibvirt.virDomain, "jobInfo")
@mock.patch.object(fakelibvirt.virDomain, "jobStats")
def test_job_info_operation_invalid(self, mock_stats, mock_info):
mock_stats.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"virDomainGetJobStats not implemented",
fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_info.side_effect = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Domain is not running",
fakelibvirt.VIR_ERR_OPERATION_INVALID)
info = host.DomainJobInfo.for_domain(self.dom)
self.assertIsInstance(info, host.DomainJobInfo)
self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_COMPLETED, info.type)
self.assertEqual(0, info.time_elapsed)
self.assertEqual(0, info.time_remaining)
self.assertEqual(0, info.memory_total)
self.assertEqual(0, info.memory_processed)
self.assertEqual(0, info.memory_remaining)
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
| |
from __future__ import absolute_import
import json
from django.db.transaction import non_atomic_requests
from django.http import HttpResponse
from rest_framework.generics import ListAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.base import CORSMixin, MarketplaceView
from mkt.api.paginator import ESPaginator
from mkt.api.permissions import AnyOf, GroupPermission
from mkt.operators.permissions import IsOperatorPermission
from mkt.search.forms import ApiSearchForm
from mkt.search.indexers import BaseIndexer
from mkt.search.filters import (DeviceTypeFilter, OpenMobileACLFilter,
ProfileFilter, PublicContentFilter,
PublicSearchFormFilter, RegionFilter,
SearchQueryFilter, SortingFilter,
ValidAppsFilter)
from mkt.search.serializers import DynamicSearchSerializer
from mkt.search.utils import Search
from mkt.translations.helpers import truncate
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.serializers import (ESAppSerializer, RocketbarESAppSerializer,
RocketbarESAppSerializerV2,
SuggestionsESAppSerializer)
from mkt.websites.indexers import WebsiteIndexer
from mkt.websites.serializers import ESWebsiteSerializer
class SearchView(CORSMixin, MarketplaceView, ListAPIView):
"""
Base app search view based on a single-string query.
"""
cors_allowed_methods = ['get']
authentication_classes = [RestSharedSecretAuthentication,
RestOAuthAuthentication]
permission_classes = [AllowAny]
filter_backends = [DeviceTypeFilter, ProfileFilter, PublicContentFilter,
PublicSearchFormFilter, RegionFilter, SearchQueryFilter,
SortingFilter]
serializer_class = ESAppSerializer
form_class = ApiSearchForm
paginator_class = ESPaginator
def get_queryset(self):
return WebappIndexer.search()
@classmethod
def as_view(cls, **kwargs):
# Make all search views non_atomic: they should not need the db, or
# at least they should not need to make db writes, so they don't need
# to be wrapped in transactions.
view = super(SearchView, cls).as_view(**kwargs)
return non_atomic_requests(view)
class MultiSearchView(SearchView):
"""
Search View capable of returning multiple content types in the same
results list (e.g., apps + sites). Can take a `doc_type` param to filter by
`app`s only or `site`s only.
"""
serializer_class = DynamicSearchSerializer
def _get_doc_types(self):
# Check if we are filtering by a doc_type (e.g., apps, sites).
# Default to all content types.
doc_type = self.request.GET.get('doc_type', 'all')
app_doc = WebappIndexer.get_mapping_type_name()
site_doc = WebsiteIndexer.get_mapping_type_name()
if doc_type == 'webapp':
return [app_doc]
elif doc_type == 'website':
return [site_doc]
return [app_doc, site_doc]
def _get_indices(self):
# Check if we are filtering by a doc_type (e.g., apps, sites).
# Default to all content types.
doc_type = self.request.GET.get('doc_type', 'all')
app_index = WebappIndexer.get_index()
site_index = WebsiteIndexer.get_index()
if doc_type == 'webapp':
return [app_index]
elif doc_type == 'website':
return [site_index]
return [app_index, site_index]
def get_serializer_context(self):
context = super(MultiSearchView, self).get_serializer_context()
context['serializer_classes'] = {
'webapp': ESAppSerializer,
'website': ESWebsiteSerializer
}
return context
def get_queryset(self):
excluded_fields = list(set(WebappIndexer.hidden_fields +
WebsiteIndexer.hidden_fields))
return (Search(using=BaseIndexer.get_es(),
index=self._get_indices(),
doc_type=self._get_doc_types())
.extra(_source={'exclude': excluded_fields}))
class FeaturedSearchView(SearchView):
def list(self, request, *args, **kwargs):
response = super(FeaturedSearchView, self).list(request, *args,
**kwargs)
data = self.add_featured_etc(request, response.data)
return Response(data)
def add_featured_etc(self, request, data):
# This endpoint used to return rocketfuel collections data but
# rocketfuel is not used anymore now that we have the feed. To keep
# backwards-compatibility we return empty arrays for the 3 keys that
# contained rocketfuel data.
data['collections'] = []
data['featured'] = []
data['operator'] = []
return data
class SuggestionsView(SearchView):
authentication_classes = []
serializer_class = SuggestionsESAppSerializer
def list(self, request, *args, **kwargs):
query = request.GET.get('q', '')
response = super(SuggestionsView, self).list(request, *args, **kwargs)
names = []
descs = []
urls = []
icons = []
for base_data in response.data['objects']:
names.append(base_data['name'])
descs.append(truncate(base_data['description']))
urls.append(base_data['absolute_url'])
icons.append(base_data['icon'])
# This results a list. Usually this is a bad idea, but we don't return
# any user-specific data, it's fully anonymous, so we're fine.
return HttpResponse(json.dumps([query, names, descs, urls, icons]),
content_type='application/x-suggestions+json')
class NonPublicSearchView(SearchView):
"""
A search view that allows searching for apps with non-public statuses
protected behind a permission class. Region exclusions still affects
results.
"""
authentication_classes = [RestSharedSecretAuthentication,
RestOAuthAuthentication]
permission_classes = [GroupPermission('Feed', 'Curate')]
filter_backends = [SearchQueryFilter, PublicSearchFormFilter,
ValidAppsFilter, DeviceTypeFilter, RegionFilter,
ProfileFilter, SortingFilter]
class NoRegionSearchView(SearchView):
"""
A search view that allows searching for public apps regardless of region
exclusions, protected behind a permission class.
A special class is needed because when RegionFilter is included, as it is
in the default SearchView, it will always use whatever region was set on
the request, and we default to setting restofworld when no region is
passed.
"""
authentication_classes = [RestSharedSecretAuthentication,
RestOAuthAuthentication]
permission_classes = [AnyOf(GroupPermission('Feed', 'Curate'),
GroupPermission('OperatorDashboard', '*'),
IsOperatorPermission)]
filter_backends = [SearchQueryFilter, PublicSearchFormFilter,
PublicContentFilter, DeviceTypeFilter,
ProfileFilter, SortingFilter]
class RocketbarView(SearchView):
cors_allowed_methods = ['get']
authentication_classes = []
permission_classes = [AllowAny]
serializer_class = RocketbarESAppSerializer
def get(self, request, *args, **kwargs):
limit = request.GET.get('limit', 5)
es_query = {
'apps': {
'completion': {'field': 'name_suggest', 'size': limit},
'text': request.GET.get('q', '').strip()
}
}
results = WebappIndexer.get_es().suggest(
body=es_query, index=WebappIndexer.get_index())
if 'apps' in results:
data = results['apps'][0]['options']
else:
data = []
serializer = self.get_serializer(data)
# This returns a JSON list. Usually this is a bad idea for security
# reasons, but we don't include any user-specific data, it's fully
# anonymous, so we're fine.
return HttpResponse(json.dumps(serializer.data),
content_type='application/x-rocketbar+json')
class RocketbarViewV2(RocketbarView):
serializer_class = RocketbarESAppSerializerV2
class OpenMobileACLSearchView(SearchView):
"""
A search view designed to find all valid apps using the Openmobile ACL
feature flag. Region exclusions are ignored. The consumer pages will use
that to verify the user has at least one app installed that belongs to that
list before trying to install an ACL.
It returns a list of manifest URLs directly, without pagination.
"""
filter_backends = [ValidAppsFilter, OpenMobileACLFilter]
def get_queryset(self):
qs = super(OpenMobileACLSearchView, self).get_queryset()
return qs.extra(_source={'include': ['manifest_url']})
def get(self, request, *args, **kwargs):
hits = self.filter_queryset(self.get_queryset()).execute().hits
data = [obj['manifest_url'] for obj in hits]
# This returns a JSON list. Usually this is a bad idea for security
# reasons, but we don't include any user-specific data, it's fully
# anonymous, so we're fine.
return HttpResponse(json.dumps(data),
content_type='application/json')
| |
import argparse
import json
import signal
import sys
import logging
import mygplus
import mytwitter
import myfb
from apscheduler.schedulers.blocking import BlockingScheduler
logging.basicConfig()
LAST_POST_ID = ''
FB_EXTENDED_TOKEN = ''
FB_EXTENDED_TOKEN_TIME = 0
FB_EXTENDED_TOKEN_FLAG = 0
def parseArgs():
'''parse the args'''
parser = argparse.ArgumentParser()
parser.add_argument('conf')
args = parser.parse_args()
return args
def parse_gplus_data(one_activity):
'''parse gplus data to get image'''
title = one_activity['title']
url = one_activity['url']
url_image = ''
created_time = one_activity['published']
updated_time = one_activity['updated']
object_type = one_activity['object'].get('objectType', 'NULL')
print("Object type is --- {}".format(object_type))
if object_type == 'activity':
if one_activity.get('annotation', '') == '':
print('No annotation found, doing nothing')
else:
title = ' '.join([one_activity['annotation'], '-', title])
print('Annotation found, adding as prefix to title')
if one_activity['object'].get('attachments', '') == '':
print("No attachments found !")
url_image = ''
else:
attachment_type = one_activity['object']['attachments'][0].get('objectType', 'NULL')
print("Attachment type is --- {}".format(attachment_type))
print("Attachments, found, trying to get an image...")
type_of_post = {
"article":[
one_activity['object']['attachments'][0].get('fullImage',''),
],
"video": [
one_activity['object']['attachments'][0].get('image',''),
],
"photo": [
one_activity['object']['attachments'][0].get('fullImage',''),
],
"album": [
one_activity['object']['attachments'][0].get('thumbnails',''),
],
}
if one_activity['object']['attachments'][0].get('url', '') == '':
print("No url found to add to title")
else:
print("URL found, appending to title")
title = ' '.join([title, one_activity['object']['attachments'][0].get('url', '')])
post_type = type_of_post[attachment_type]
if post_type == '':
url_image = ''
print("Attached image NOT found, using empty url")
else:
print("Attached image found")
if attachment_type == 'video':
url_image = one_activity['object']['attachments'][0]['image']['url']
elif attachment_type == 'album':
url_image = one_activity['object']['attachments'][0]['thumbnails'][0]['image']['url']
else:
### else includes attachment_type for article/photo and everything else
if one_activity['object']['attachments'][0].get('fullImage','') == '':
url_image = ''
print("full image not found")
else:
url_image = one_activity['object']['attachments'][0]['fullImage']['url']
print("full image found")
return [title, url, url_image, created_time, updated_time]
def fetch_and_post(gplus_user_id):
'''Fetching google plus posts
and posting them to twitter and facebook
'''
global LAST_POST_ID
global FB_EXTENDED_TOKEN
global FB_EXTENDED_TOKEN_TIME
global FB_EXTENDED_TOKEN_FLAG
print("LAST_POST_ID is - {}".format(LAST_POST_ID))
try:
my_gplus = mygplus.MyGPlus(gplus_user_id)
my_activites = my_gplus.get_gplus_posts(LAST_POST_ID)
except Exception as errObj:
print("Error fetching posts from Google+")
print(errObj)
print("Got my_activites - {}".format(len(my_activites)))
if LAST_POST_ID == my_activites[-1]['id']:
print("LAST_POST_ID is the same as before - {}".format(LAST_POST_ID))
print("Not posting again till new posts arrive\n")
return
else:
LAST_POST_ID = my_activites[-1]['id']
print("Last post id updated to - {}".format(LAST_POST_ID))
my_twitter = mytwitter.MyTwitter()
print("Extended token is --- {}".format(FB_EXTENDED_TOKEN))
print("Extended time is --- {}".format(FB_EXTENDED_TOKEN_TIME))
print("Extended FLAG is --- {}".format(FB_EXTENDED_TOKEN_FLAG))
my_fb = myfb.MyFB(FB_EXTENDED_TOKEN, FB_EXTENDED_TOKEN_TIME, FB_EXTENDED_TOKEN_FLAG)
for activity in my_activites:
print("\n--------STARTING --------")
title, url, url_image, created_time, updated_time = parse_gplus_data(activity)
print(title)
print(url)
print(url_image)
try:
my_twitter.post_to_twitter(title, url, url_image)
print("Twitter post done")
except Exception as errObj:
print("Error posting to Twitter")
print(errObj)
try:
my_fb.post_to_fb(title, url, url_image, created_time, updated_time)
print("facebook post done")
except Exception as errObj:
print("Error posting to Facebook")
print(errObj)
print(sys.exc_info())
print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))
exc_info = sys.exc_info()
finally:
print("finally done")
# Display the *original* exception
# traceback.print_exception(*exc_info)
# del exc_info
print("--------ENDING --------\n")
FB_EXTENDED_TOKEN, FB_EXTENDED_TOKEN_TIME, FB_EXTENDED_TOKEN_FLAG = my_fb.get_extended_token()
def main():
'''main method'''
args = parseArgs()
config_file = args.conf
global LAST_POST_ID
global FB_EXTENDED_TOKEN
global FB_EXTENDED_TOKEN_TIME
global FB_EXTENDED_TOKEN_FLAG
with open(config_file) as json_file:
json_data = json.load(json_file)
FB_EXTENDED_TOKEN = json_data['facebook_extended_token']
FB_EXTENDED_TOKEN_TIME = json_data['facebook_extended_token_time']
FB_EXTENDED_TOKEN_FLAG = json_data['facebook_extended_token_flag']
LAST_POST_ID = json_data['last_post_id']
time_interval = int(json_data['interval'])
gplus_user_id = json_data['gplus_user_id']
print("Last post id from args is - {}".format(LAST_POST_ID))
fetch_and_post(gplus_user_id)
my_scheduler = BlockingScheduler(timezone='UTC')
my_scheduler.add_job(
fetch_and_post,
'interval',
minutes=time_interval,
args=[gplus_user_id],
id='posts'
)
my_scheduler.start()
def signal_handler(signal, frame):
args = parseArgs()
config_file = args.conf
print("\nWriting last_post_id - {} to {}".format(LAST_POST_ID, config_file))
with open(config_file) as json_file:
json_data = json.load(json_file)
json_data['last_post_id'] = LAST_POST_ID
json_data['facebook_extended_token'] = FB_EXTENDED_TOKEN
json_data['facebook_extended_token_time'] = FB_EXTENDED_TOKEN_TIME
with open(config_file, 'w') as json_file:
json.dump(json_data, json_file, indent=4)
print("\nCaught signal - {} and frame - {}".format(signal, frame))
print("Quitting...")
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.